vm: clean up empty lines in .c and .h files
This commit is contained in:
parent
b64b31338f
commit
c3aa3bf97c
@ -154,4 +154,3 @@ default_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
|
||||
/* An OBJT_DEFAULT object has no backing store. */
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,6 @@ SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN,
|
||||
SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
|
||||
&memguard_frequency_hits, 0, "# times MemGuard randomly chose");
|
||||
|
||||
|
||||
/*
|
||||
* Return a fudged value to be used for vm_kmem_size for allocating
|
||||
* the kernel_arena.
|
||||
|
@ -37,7 +37,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <vm/redzone.h>
|
||||
|
||||
|
||||
static SYSCTL_NODE(_vm, OID_AUTO, redzone, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
|
||||
"RedZone data");
|
||||
static u_long redzone_extra_mem = 0;
|
||||
|
@ -134,7 +134,7 @@ sg_pager_dealloc(vm_object_t object)
|
||||
TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, plinks.q);
|
||||
vm_page_putfake(m);
|
||||
}
|
||||
|
||||
|
||||
sg = object->handle;
|
||||
sglist_free(sg);
|
||||
object->handle = NULL;
|
||||
|
@ -865,7 +865,6 @@ swp_pager_strategy(struct buf *bp)
|
||||
panic("Swapdev not found");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SWP_PAGER_FREESWAPSPACE() - free raw swap space
|
||||
*
|
||||
@ -2744,7 +2743,6 @@ static struct g_class g_swap_class = {
|
||||
|
||||
DECLARE_GEOM_CLASS(g_swap_class, g_class);
|
||||
|
||||
|
||||
static void
|
||||
swapgeom_close_ev(void *arg, int flags)
|
||||
{
|
||||
@ -3008,7 +3006,6 @@ swapdev_close(struct thread *td, struct swdevt *sp)
|
||||
vrele(sp->sw_vp);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
|
||||
{
|
||||
|
@ -154,7 +154,6 @@ typedef void (*uma_release)(void *arg, void **store, int count);
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/* Function proto types */
|
||||
|
||||
/*
|
||||
|
@ -1892,7 +1892,6 @@ pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
|
||||
kva_free(sva, size);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Zero fill initializer
|
||||
*
|
||||
@ -4976,7 +4975,6 @@ uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
|
||||
uma_cache_t cache;
|
||||
int i;
|
||||
|
||||
|
||||
for (i = 0; i < vm_ndomains; i++) {
|
||||
zdom = ZDOM_GET(z, i);
|
||||
uth->uth_zone_free += zdom->uzd_nitems;
|
||||
|
@ -307,14 +307,14 @@ cache_uz_flags(uma_cache_t cache)
|
||||
|
||||
return (cache->uc_freebucket.ucb_spare);
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
cache_uz_size(uma_cache_t cache)
|
||||
{
|
||||
|
||||
return (cache->uc_allocbucket.ucb_spare);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Per-domain slab lists. Embedded in the kegs.
|
||||
*/
|
||||
|
@ -163,4 +163,3 @@ void swapper(void);
|
||||
#endif
|
||||
|
||||
#endif /* VM_H */
|
||||
|
||||
|
@ -872,7 +872,6 @@ vm_fault_cow(struct faultstate *fs)
|
||||
(is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) &&
|
||||
fs->object == fs->first_object->backing_object &&
|
||||
VM_OBJECT_TRYWLOCK(fs->object)) {
|
||||
|
||||
/*
|
||||
* Remove but keep xbusy for replace. fs->m is moved into
|
||||
* fs->first_object and left busy while fs->first_m is
|
||||
@ -1011,7 +1010,6 @@ vm_fault_allocate(struct faultstate *fs)
|
||||
int alloc_req;
|
||||
int rv;
|
||||
|
||||
|
||||
if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) {
|
||||
rv = vm_fault_lock_vnode(fs, true);
|
||||
MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE);
|
||||
|
@ -2940,7 +2940,6 @@ vm_map_madvise(
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_map_inherit:
|
||||
*
|
||||
@ -3235,7 +3234,6 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
|
||||
return (rv);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_map_wire_locked:
|
||||
*
|
||||
@ -3824,7 +3822,6 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* vm_map_copy_swap_object:
|
||||
|
@ -124,7 +124,7 @@ SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
|
||||
static int
|
||||
sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
|
||||
|
||||
#ifdef SCTL_MASK32
|
||||
u_int32_t la[4];
|
||||
|
||||
|
@ -148,7 +148,6 @@ ogetpagesize(struct thread *td, struct ogetpagesize_args *uap)
|
||||
}
|
||||
#endif /* COMPAT_43 */
|
||||
|
||||
|
||||
/*
|
||||
* Memory Map (mmap) system call. Note that the file offset
|
||||
* and address are allowed to be NOT page aligned, though if
|
||||
@ -257,7 +256,7 @@ kern_mmap_req(struct thread *td, const struct mmap_req *mrp)
|
||||
* Ignore old flags that used to be defined but did not do anything.
|
||||
*/
|
||||
flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
|
||||
|
||||
|
||||
/*
|
||||
* Enforce the constraints.
|
||||
* Mapping of length 0 is only allowed for old binaries.
|
||||
@ -498,7 +497,6 @@ ommap(struct thread *td, struct ommap_args *uap)
|
||||
}
|
||||
#endif /* COMPAT_43 */
|
||||
|
||||
|
||||
#ifndef _SYS_SYSPROTO_H_
|
||||
struct msync_args {
|
||||
void *addr;
|
||||
@ -846,7 +844,6 @@ RestartScan:
|
||||
*/
|
||||
lastvecindex = -1;
|
||||
while (entry->start < end) {
|
||||
|
||||
/*
|
||||
* check for contiguity
|
||||
*/
|
||||
|
@ -278,7 +278,7 @@ vm_object_init(void)
|
||||
{
|
||||
TAILQ_INIT(&vm_object_list);
|
||||
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
|
||||
|
||||
|
||||
rw_init(&kernel_object->lock, "kernel vm object");
|
||||
_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
|
||||
VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
|
||||
@ -556,7 +556,6 @@ vm_object_deallocate_vnode(vm_object_t object)
|
||||
vrele(vp);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We dropped a reference on an object and discovered that it had a
|
||||
* single remaining shadow. This is a sibling of the reference we
|
||||
@ -2269,7 +2268,6 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
* Account for the charge.
|
||||
*/
|
||||
if (prev_object->cred != NULL) {
|
||||
|
||||
/*
|
||||
* If prev_object was charged, then this mapping,
|
||||
* although not charged now, may become writable
|
||||
@ -2435,7 +2433,6 @@ vm_object_vnode(vm_object_t object)
|
||||
return (vp);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Busy the vm object. This prevents new pages belonging to the object from
|
||||
* becoming busy. Existing pages persist as busy. Callers are responsible
|
||||
|
@ -130,7 +130,6 @@ vm_pager_put_pages(
|
||||
int flags,
|
||||
int *rtvals
|
||||
) {
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
(*pagertab[object->type]->pgo_putpages)
|
||||
(object, m, count, flags, rtvals);
|
||||
@ -173,7 +172,6 @@ vm_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
|
||||
fault_type, max_prot, first, last));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_pager_page_unswapped
|
||||
*
|
||||
|
@ -109,7 +109,6 @@ void vm_phys_early_startup(void);
|
||||
int vm_phys_avail_largest(void);
|
||||
vm_paddr_t vm_phys_avail_size(int i);
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* vm_phys_domain:
|
||||
|
@ -217,7 +217,6 @@ vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v,
|
||||
enum vm_radix_access access)
|
||||
{
|
||||
|
||||
|
||||
switch (access) {
|
||||
case UNSERIALIZED:
|
||||
smr_unserialized_store(p, v, true);
|
||||
|
@ -228,7 +228,6 @@ vnode_destroy_vobject(struct vnode *vp)
|
||||
KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Allocate (or lookup) pager for a vnode.
|
||||
* Handle is a vnode pointer.
|
||||
|
Loading…
x
Reference in New Issue
Block a user