whitespace / register cleanup

This commit is contained in:
Matthew Dillon 2001-07-04 19:00:13 +00:00
parent ab163f5fee
commit 54d9214595
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=79242
12 changed files with 81 additions and 80 deletions

View File

@ -1502,7 +1502,7 @@ swp_pager_sync_iodone(bp)
static void
swp_pager_async_iodone(bp)
register struct buf *bp;
struct buf *bp;
{
int s;
int i;

View File

@ -915,8 +915,8 @@ vm_fault_wire(map, start, end)
vm_offset_t start, end;
{
register vm_offset_t va;
register pmap_t pmap;
vm_offset_t va;
pmap_t pmap;
int rv;
pmap = vm_map_pmap(map);
@ -958,8 +958,8 @@ vm_fault_user_wire(map, start, end)
vm_offset_t start, end;
{
register vm_offset_t va;
register pmap_t pmap;
vm_offset_t va;
pmap_t pmap;
int rv;
GIANT_REQUIRED;
@ -1000,8 +1000,8 @@ vm_fault_unwire(map, start, end)
vm_offset_t start, end;
{
register vm_offset_t va, pa;
register pmap_t pmap;
vm_offset_t va, pa;
pmap_t pmap;
pmap = vm_map_pmap(map);

View File

@ -210,10 +210,10 @@ vsunlock(addr, len)
*/
void
vm_fork(p1, p2, flags)
register struct proc *p1, *p2;
struct proc *p1, *p2;
int flags;
{
register struct user *up;
struct user *up;
GIANT_REQUIRED;
@ -295,7 +295,7 @@ static void
vm_init_limits(udata)
void *udata;
{
register struct proc *p = udata;
struct proc *p = udata;
int rss_limit;
/*
@ -359,8 +359,8 @@ static void
scheduler(dummy)
void *dummy;
{
register struct proc *p;
register int pri;
struct proc *p;
int pri;
struct proc *pp;
int ppri;
@ -452,7 +452,7 @@ void
swapout_procs(action)
int action;
{
register struct proc *p;
struct proc *p;
struct proc *outp, *outp2;
int outpri, outpri2;
int didswap = 0;
@ -569,7 +569,7 @@ int action;
static void
swapout(p)
register struct proc *p;
struct proc *p;
{
PROC_LOCK_ASSERT(p, MA_OWNED);

View File

@ -77,8 +77,8 @@ static fixpt_t cexp[3] = {
static void
loadav(struct loadavg *avg)
{
register int i, nrun;
register struct proc *p;
int i, nrun;
struct proc *p;
sx_slock(&allproc_lock);
for (nrun = 0, p = LIST_FIRST(&allproc); p != 0; p = LIST_NEXT(p, p_list)) {

View File

@ -189,10 +189,10 @@ struct mmap_args {
int
mmap(p, uap)
struct proc *p;
register struct mmap_args *uap;
struct mmap_args *uap;
{
register struct filedesc *fdp = p->p_fd;
register struct file *fp = NULL;
struct filedesc *fdp = p->p_fd;
struct file *fp = NULL;
struct vnode *vp;
vm_offset_t addr;
vm_size_t size, pageoff;
@ -447,7 +447,7 @@ struct ommap_args {
int
ommap(p, uap)
struct proc *p;
register struct ommap_args *uap;
struct ommap_args *uap;
{
struct mmap_args nargs;
static const char cvtbsdprot[8] = {
@ -577,8 +577,8 @@ struct munmap_args {
#endif
int
munmap(p, uap)
register struct proc *p;
register struct munmap_args *uap;
struct proc *p;
struct munmap_args *uap;
{
vm_offset_t addr;
vm_size_t size, pageoff;
@ -649,7 +649,7 @@ mprotect(p, uap)
{
vm_offset_t addr;
vm_size_t size, pageoff;
register vm_prot_t prot;
vm_prot_t prot;
int ret;
addr = (vm_offset_t) uap->addr;
@ -694,7 +694,7 @@ minherit(p, uap)
{
vm_offset_t addr;
vm_size_t size, pageoff;
register vm_inherit_t inherit;
vm_inherit_t inherit;
int ret;
addr = (vm_offset_t)uap->addr;
@ -792,7 +792,7 @@ mincore(p, uap)
char *vec;
int error;
int vecindex, lastvecindex;
register vm_map_entry_t current;
vm_map_entry_t current;
vm_map_entry_t entry;
int mincoreinfo;
unsigned int timestamp;
@ -830,9 +830,9 @@ mincore(p, uap)
* up the pages elsewhere.
*/
lastvecindex = -1;
for(current = entry;
(current != &map->header) && (current->start < end);
current = current->next) {
for (current = entry;
(current != &map->header) && (current->start < end);
current = current->next) {
/*
* ignore submaps (for now) or null objects
@ -854,7 +854,7 @@ mincore(p, uap)
/*
* scan this entry one page at a time
*/
while(addr < cend) {
while (addr < cend) {
/*
* Check pmap first, it is likely faster, also
* it can provide info as to whether we are the
@ -904,7 +904,7 @@ mincore(p, uap)
* If we have skipped map entries, we need to make sure that
* the byte vector is zeroed for those skipped entries.
*/
while((lastvecindex + 1) < vecindex) {
while ((lastvecindex + 1) < vecindex) {
error = subyte( vec + lastvecindex, 0);
if (error) {
mtx_unlock(&Giant);
@ -945,7 +945,7 @@ mincore(p, uap)
* Zero the last entries in the byte vector.
*/
vecindex = OFF_TO_IDX(end - first_addr);
while((lastvecindex + 1) < vecindex) {
while ((lastvecindex + 1) < vecindex) {
error = subyte( vec + lastvecindex, 0);
if (error) {
mtx_unlock(&Giant);

View File

@ -573,7 +573,7 @@ vm_object_page_clean(object, start, end, flags)
rescan:
curgeneration = object->generation;
for(p = TAILQ_FIRST(&object->memq); p; p = np) {
for (p = TAILQ_FIRST(&object->memq); p; p = np) {
np = TAILQ_NEXT(p, listq);
pi = p->pindex;
@ -610,7 +610,7 @@ vm_object_page_clean(object, start, end, flags)
}
maxf = 0;
for(i=1;i<vm_pageout_page_count;i++) {
for (i = 1; i < vm_pageout_page_count; i++) {
if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
if ((tp->flags & PG_BUSY) ||
(tp->flags & PG_CLEANCHK) == 0 ||
@ -635,7 +635,7 @@ vm_object_page_clean(object, start, end, flags)
maxb = 0;
chkb = vm_pageout_page_count - maxf;
if (chkb) {
for(i = 1; i < chkb;i++) {
for (i = 1; i < chkb; i++) {
if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
if ((tp->flags & PG_BUSY) ||
(tp->flags & PG_CLEANCHK) == 0 ||
@ -658,14 +658,14 @@ vm_object_page_clean(object, start, end, flags)
}
}
for(i=0;i<maxb;i++) {
for (i = 0; i < maxb; i++) {
int index = (maxb - i) - 1;
ma[index] = mab[i];
vm_page_flag_clear(ma[index], PG_CLEANCHK);
}
vm_page_flag_clear(p, PG_CLEANCHK);
ma[maxb] = p;
for(i=0;i<maxf;i++) {
for (i = 0 ; i < maxf; i++) {
int index = (maxb + i) + 1;
ma[index] = maf[i];
vm_page_flag_clear(ma[index], PG_CLEANCHK);
@ -674,7 +674,7 @@ vm_object_page_clean(object, start, end, flags)
splx(s);
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i<runlen; i++) {
for (i = 0; i < runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
vm_page_protect(ma[i], VM_PROT_READ);
vm_page_flag_set(ma[i], PG_CLEANCHK);
@ -1637,7 +1637,7 @@ _vm_object_in_map(map, object, entry)
tmpe = tmpe->next;
}
} else if ((obj = entry->object.vm_object) != NULL) {
for(; obj; obj=obj->backing_object)
for (; obj; obj = obj->backing_object)
if( obj == object) {
return 1;
}
@ -1794,7 +1794,7 @@ DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
osize = object->size;
if (osize > 128)
osize = 128;
for(idx=0;idx<osize;idx++) {
for (idx = 0; idx < osize; idx++) {
m = vm_page_lookup(object, idx);
if (m == NULL) {
if (rcount) {

View File

@ -102,19 +102,20 @@ static volatile int vm_page_bucket_generation;
struct vpgqueues vm_page_queues[PQ_COUNT];
static void
vm_page_queue_init(void) {
vm_page_queue_init(void)
{
int i;
for(i=0;i<PQ_L2_SIZE;i++) {
for (i = 0; i < PQ_L2_SIZE; i++) {
vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
}
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
for(i=0;i<PQ_L2_SIZE;i++) {
for (i = 0; i < PQ_L2_SIZE; i++) {
vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
}
for(i=0;i<PQ_COUNT;i++) {
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
for (i = 0; i < PQ_COUNT; i++) {
TAILQ_INIT(&vm_page_queues[i].pl);
}
}
@ -181,14 +182,14 @@ vm_add_new_page(pa)
vm_offset_t
vm_page_startup(starta, enda, vaddr)
register vm_offset_t starta;
vm_offset_t starta;
vm_offset_t enda;
vm_offset_t vaddr;
{
register vm_offset_t mapped;
register struct vm_page **bucket;
vm_offset_t mapped;
struct vm_page **bucket;
vm_size_t npages, page_range;
register vm_offset_t new_end;
vm_offset_t new_end;
int i;
vm_offset_t pa;
int nblocks;
@ -357,11 +358,11 @@ vm_page_hash(object, pindex)
void
vm_page_insert(m, object, pindex)
register vm_page_t m;
register vm_object_t object;
register vm_pindex_t pindex;
vm_page_t m;
vm_object_t object;
vm_pindex_t pindex;
{
register struct vm_page **bucket;
struct vm_page **bucket;
GIANT_REQUIRED;
@ -497,11 +498,11 @@ vm_page_remove(m)
vm_page_t
vm_page_lookup(object, pindex)
register vm_object_t object;
register vm_pindex_t pindex;
vm_object_t object;
vm_pindex_t pindex;
{
register vm_page_t m;
register struct vm_page **bucket;
vm_page_t m;
struct vm_page **bucket;
int generation;
/*
@ -549,8 +550,8 @@ vm_page_lookup(object, pindex)
void
vm_page_rename(m, new_object, new_pindex)
register vm_page_t m;
register vm_object_t new_object;
vm_page_t m;
vm_object_t new_object;
vm_pindex_t new_pindex;
{
int s;
@ -748,7 +749,7 @@ vm_page_alloc(object, pindex, page_req)
vm_pindex_t pindex;
int page_req;
{
register vm_page_t m = NULL;
vm_page_t m = NULL;
int s;
GIANT_REQUIRED;
@ -934,7 +935,7 @@ vm_await()
*/
void
vm_page_activate(m)
register vm_page_t m;
vm_page_t m;
{
int s;
@ -1154,7 +1155,7 @@ vm_page_unmanage(vm_page_t m)
*/
void
vm_page_wire(m)
register vm_page_t m;
vm_page_t m;
{
int s;
@ -1204,7 +1205,7 @@ vm_page_wire(m)
*/
void
vm_page_unwire(m, activate)
register vm_page_t m;
vm_page_t m;
int activate;
{
int s;
@ -1335,7 +1336,7 @@ vm_page_try_to_free(m)
*/
void
vm_page_cache(m)
register vm_page_t m;
vm_page_t m;
{
int s;
@ -1972,13 +1973,13 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
{
int i;
db_printf("PQ_FREE:");
for(i=0;i<PQ_L2_SIZE;i++) {
for (i = 0; i < PQ_L2_SIZE; i++) {
db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
}
db_printf("\n");
db_printf("PQ_CACHE:");
for(i=0;i<PQ_L2_SIZE;i++) {
for (i = 0; i < PQ_L2_SIZE; i++) {
db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
}
db_printf("\n");

View File

@ -220,7 +220,7 @@ static int
vm_pageout_clean(m)
vm_page_t m;
{
register vm_object_t object;
vm_object_t object;
vm_page_t mc[2*vm_pageout_page_count];
int pageout_count;
int ib, is, page_base;
@ -362,7 +362,7 @@ vm_pageout_flush(mc, count, flags)
int count;
int flags;
{
register vm_object_t object;
vm_object_t object;
int pageout_status[count];
int numpagedout = 0;
int i;
@ -458,7 +458,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
vm_pindex_t desired;
int map_remove_only;
{
register vm_page_t p, next;
vm_page_t p, next;
int rcount;
int remove_mode;
int s;

View File

@ -332,10 +332,10 @@ vm_pager_unmap_page(kva)
vm_object_t
vm_pager_object_lookup(pg_list, handle)
register struct pagerlst *pg_list;
struct pagerlst *pg_list;
void *handle;
{
register vm_object_t object;
vm_object_t object;
TAILQ_FOREACH(object, pg_list, pager_object_list)
if (object->handle == handle)

View File

@ -91,7 +91,7 @@ swapdev_strategy(ap)
} */ *ap;
{
int s, sz, off, seg, index;
register struct swdevt *sp;
struct swdevt *sp;
struct vnode *vp;
struct buf *bp;
@ -189,7 +189,7 @@ swapon(p, uap)
struct proc *p;
struct swapon_args *uap;
{
register struct vnode *vp;
struct vnode *vp;
struct nameidata nd;
int error;
@ -242,9 +242,9 @@ swaponvp(p, vp, dev, nblks)
u_long nblks;
{
int index;
register struct swdevt *sp;
register swblk_t vsbase;
register long blk;
struct swdevt *sp;
swblk_t vsbase;
long blk;
swblk_t dvbase;
int error;
u_long aligned_nblks;

View File

@ -71,7 +71,7 @@ obreak(p, uap)
struct proc *p;
struct obreak_args *uap;
{
register struct vmspace *vm = p->p_vmspace;
struct vmspace *vm = p->p_vmspace;
vm_offset_t new, old, base;
int rv;
int error = 0;

View File

@ -171,7 +171,7 @@ static void
vnode_pager_dealloc(object)
vm_object_t object;
{
register struct vnode *vp = object->handle;
struct vnode *vp = object->handle;
GIANT_REQUIRED;
if (vp == NULL)
@ -531,7 +531,7 @@ vnode_pager_input_old(object, m)
error = VOP_READ(vp, &auio, 0, curproc->p_ucred);
if (!error) {
register int count = size - auio.uio_resid;
int count = size - auio.uio_resid;
if (count == 0)
error = EINVAL;