MFC
This commit is contained in:
parent
9d00dd1afe
commit
cbe7c0e167
@ -220,15 +220,14 @@ you can use the
|
||||
option to force the kernel to use the serial port as its
|
||||
console device.
|
||||
The serial port driver
|
||||
.Xr uart 4
|
||||
.Xr sio 4
|
||||
(but not
|
||||
.Xr uart 4 )
|
||||
has a flag (0x20) to override this option.
|
||||
If that flag is set, the serial port will always be used as the console,
|
||||
regardless of the
|
||||
.Fl h
|
||||
option described here.
|
||||
See the man page for
|
||||
.Xr uart 4
|
||||
for more details.
|
||||
.It Fl m
|
||||
mute the console to suppress all console input and output during the
|
||||
boot.
|
||||
|
@ -392,7 +392,6 @@ static uma_zone_t l2table_zone;
|
||||
static vm_offset_t pmap_kernel_l2dtable_kva;
|
||||
static vm_offset_t pmap_kernel_l2ptp_kva;
|
||||
static vm_paddr_t pmap_kernel_l2ptp_phys;
|
||||
static struct vm_object pvzone_obj;
|
||||
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
|
||||
static struct rwlock pvh_global_lock;
|
||||
|
||||
@ -1164,7 +1163,7 @@ pmap_init(void)
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
|
||||
uma_zone_reserve_kva(pvzone, pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
||||
/*
|
||||
|
@ -397,7 +397,6 @@ static uma_zone_t l2table_zone;
|
||||
static vm_offset_t pmap_kernel_l2dtable_kva;
|
||||
static vm_offset_t pmap_kernel_l2ptp_kva;
|
||||
static vm_paddr_t pmap_kernel_l2ptp_phys;
|
||||
static struct vm_object pvzone_obj;
|
||||
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
|
||||
static struct rwlock pvh_global_lock;
|
||||
|
||||
@ -1828,7 +1827,7 @@ pmap_init(void)
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
|
||||
uma_zone_reserve_kva(pvzone, pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
||||
/*
|
||||
|
@ -153,6 +153,36 @@ u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
|
||||
return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
|
||||
}
|
||||
|
||||
/*
|
||||
* t4_report_fw_error - report firmware error
|
||||
* @adap: the adapter
|
||||
*
|
||||
* The adapter firmware can indicate error conditions to the host.
|
||||
* This routine prints out the reason for the firmware error (as
|
||||
* reported by the firmware).
|
||||
*/
|
||||
static void t4_report_fw_error(struct adapter *adap)
|
||||
{
|
||||
static const char *reason[] = {
|
||||
"Crash", /* PCIE_FW_EVAL_CRASH */
|
||||
"During Device Preparation", /* PCIE_FW_EVAL_PREP */
|
||||
"During Device Configuration", /* PCIE_FW_EVAL_CONF */
|
||||
"During Device Initialization", /* PCIE_FW_EVAL_INIT */
|
||||
"Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
|
||||
"Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
|
||||
"Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
|
||||
"Reserved", /* reserved */
|
||||
};
|
||||
u32 pcie_fw;
|
||||
|
||||
pcie_fw = t4_read_reg(adap, A_PCIE_FW);
|
||||
if (!(pcie_fw & F_PCIE_FW_ERR))
|
||||
CH_ERR(adap, "Firmware error report called with no error\n");
|
||||
else
|
||||
CH_ERR(adap, "Firmware reports adapter error: %s\n",
|
||||
reason[G_PCIE_FW_EVAL(pcie_fw)]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
|
||||
*/
|
||||
@ -267,8 +297,15 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We timed out waiting for a reply to our mailbox command. Report
|
||||
* the error and also check to see if the firmware reported any
|
||||
* errors ...
|
||||
*/
|
||||
CH_ERR(adap, "command %#x in mailbox %d timed out\n",
|
||||
*(const u8 *)cmd, mbox);
|
||||
if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
|
||||
t4_report_fw_error(adap);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -2033,9 +2070,11 @@ static void cim_intr_handler(struct adapter *adapter)
|
||||
{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
int fat;
|
||||
|
||||
if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
|
||||
t4_report_fw_error(adapter);
|
||||
|
||||
fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
|
||||
cim_intr_info) +
|
||||
t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
|
||||
@ -4103,12 +4142,16 @@ retry:
|
||||
/*
|
||||
* Issue the HELLO command to the firmware. If it's not successful
|
||||
* but indicates that we got a "busy" or "timeout" condition, retry
|
||||
* the HELLO until we exhaust our retry limit.
|
||||
* the HELLO until we exhaust our retry limit. If we do exceed our
|
||||
* retry limit, check to see if the firmware left us any error
|
||||
* information and report that if so ...
|
||||
*/
|
||||
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
|
||||
if (ret != FW_SUCCESS) {
|
||||
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
|
||||
goto retry;
|
||||
if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
|
||||
t4_report_fw_error(adap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4140,8 +4140,6 @@ isp_target_thread(ispsoftc_t *isp, int chan)
|
||||
return;
|
||||
}
|
||||
|
||||
ccb = xpt_alloc_ccb();
|
||||
|
||||
ISP_LOCK(isp);
|
||||
status = cam_periph_alloc(isptargctor, NULL, isptargdtor, isptargstart, "isptarg", CAM_PERIPH_BIO, wpath, NULL, 0, softc);
|
||||
if (status != CAM_REQ_CMP) {
|
||||
|
@ -217,7 +217,6 @@ static struct rwlock_padalign pvh_global_lock;
|
||||
|
||||
/* Data for the pv entry allocation mechanism */
|
||||
static uma_zone_t pvzone;
|
||||
static struct vm_object pvzone_obj;
|
||||
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
|
||||
|
||||
#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
|
||||
@ -1343,7 +1342,7 @@ mmu_booke_init(mmu_t mmu)
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
||||
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
|
||||
uma_zone_reserve_kva(pvzone, pv_entry_max);
|
||||
|
||||
/* Pre-fill pvzone with initial number of pv entries. */
|
||||
uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
|
||||
|
@ -343,7 +343,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
|
||||
static struct mtx sw_alloc_mtx; /* protect list manipulation */
|
||||
static struct pagerlst swap_pager_object_list[NOBJLISTS];
|
||||
static uma_zone_t swap_zone;
|
||||
static struct vm_object swap_zone_obj;
|
||||
|
||||
/*
|
||||
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
|
||||
@ -554,7 +553,7 @@ swap_pager_swap_init(void)
|
||||
if (swap_zone == NULL)
|
||||
panic("failed to create swap_zone.");
|
||||
do {
|
||||
if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n))
|
||||
if (uma_zone_reserve_kva(swap_zone, n))
|
||||
break;
|
||||
/*
|
||||
* if the allocation failed, try a zone two thirds the
|
||||
|
19
sys/vm/uma.h
19
sys/vm/uma.h
@ -432,24 +432,23 @@ void uma_reclaim(void);
|
||||
void uma_set_align(int align);
|
||||
|
||||
/*
|
||||
* Switches the backing object of a zone
|
||||
* Reserves the maximum KVA space required by the zone and configures the zone
|
||||
* to use a VM_ALLOC_NOOBJ-based backend allocator.
|
||||
*
|
||||
* Arguments:
|
||||
* zone The zone to update.
|
||||
* obj The VM object to use for future allocations.
|
||||
* size The size of the object to allocate.
|
||||
* nitems The upper limit on the number of items that can be allocated.
|
||||
*
|
||||
* Returns:
|
||||
* 0 if kva space can not be allocated
|
||||
* 0 if KVA space can not be allocated
|
||||
* 1 if successful
|
||||
*
|
||||
* Discussion:
|
||||
* A NULL object can be used and uma will allocate one for you. Setting
|
||||
* the size will limit the amount of memory allocated to this zone.
|
||||
*
|
||||
* When the machine supports a direct map and the zone's items are smaller
|
||||
* than a page, the zone will use the direct map instead of allocating KVA
|
||||
* space.
|
||||
*/
|
||||
struct vm_object;
|
||||
int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size);
|
||||
int uma_zone_reserve_kva(uma_zone_t zone, int nitems);
|
||||
|
||||
/*
|
||||
* Sets a high limit on the number of items allowed in a zone
|
||||
@ -521,7 +520,7 @@ void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit);
|
||||
void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini);
|
||||
|
||||
/*
|
||||
* Replaces the standard page_alloc or obj_alloc functions for this zone
|
||||
* Replaces the standard backend allocator for this zone.
|
||||
*
|
||||
* Arguments:
|
||||
* zone The zone whose backend allocator is being changed.
|
||||
|
@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_kern.h>
|
||||
@ -213,7 +214,7 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
|
||||
|
||||
/* Prototypes.. */
|
||||
|
||||
static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
|
||||
static void *noobj_alloc(uma_zone_t, int, u_int8_t *, int);
|
||||
static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
|
||||
static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
|
||||
static void page_free(void *, int, u_int8_t);
|
||||
@ -1030,50 +1031,53 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
|
||||
* NULL if M_NOWAIT is set.
|
||||
*/
|
||||
static void *
|
||||
obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
vm_object_t object;
|
||||
TAILQ_HEAD(, vm_page) alloctail;
|
||||
u_long npages;
|
||||
vm_offset_t retkva, zkva;
|
||||
vm_page_t p;
|
||||
int pages, startpages;
|
||||
vm_page_t p, p_next;
|
||||
uma_keg_t keg;
|
||||
|
||||
TAILQ_INIT(&alloctail);
|
||||
keg = zone_first_keg(zone);
|
||||
object = keg->uk_obj;
|
||||
retkva = 0;
|
||||
|
||||
/*
|
||||
* This looks a little weird since we're getting one page at a time.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
p = TAILQ_LAST(&object->memq, pglist);
|
||||
pages = p != NULL ? p->pindex + 1 : 0;
|
||||
startpages = pages;
|
||||
zkva = keg->uk_kva + pages * PAGE_SIZE;
|
||||
for (; bytes > 0; bytes -= PAGE_SIZE) {
|
||||
p = vm_page_alloc(object, pages,
|
||||
VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
|
||||
if (p == NULL) {
|
||||
if (pages != startpages)
|
||||
pmap_qremove(retkva, pages - startpages);
|
||||
while (pages != startpages) {
|
||||
pages--;
|
||||
p = TAILQ_LAST(&object->memq, pglist);
|
||||
vm_page_unwire(p, 0);
|
||||
vm_page_free(p);
|
||||
}
|
||||
retkva = 0;
|
||||
goto done;
|
||||
npages = howmany(bytes, PAGE_SIZE);
|
||||
while (npages > 0) {
|
||||
p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
|
||||
if (p != NULL) {
|
||||
/*
|
||||
* Since the page does not belong to an object, its
|
||||
* listq is unused.
|
||||
*/
|
||||
TAILQ_INSERT_TAIL(&alloctail, p, listq);
|
||||
npages--;
|
||||
continue;
|
||||
}
|
||||
pmap_qenter(zkva, &p, 1);
|
||||
if (retkva == 0)
|
||||
retkva = zkva;
|
||||
zkva += PAGE_SIZE;
|
||||
pages += 1;
|
||||
if (wait & M_WAITOK) {
|
||||
VM_WAIT;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Page allocation failed, free intermediate pages and
|
||||
* exit.
|
||||
*/
|
||||
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
|
||||
vm_page_unwire(p, 0);
|
||||
vm_page_free(p);
|
||||
}
|
||||
return (NULL);
|
||||
}
|
||||
done:
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
*flags = UMA_SLAB_PRIV;
|
||||
zkva = keg->uk_kva +
|
||||
atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
|
||||
retkva = zkva;
|
||||
TAILQ_FOREACH(p, &alloctail, listq) {
|
||||
pmap_qenter(zkva, &p, 1);
|
||||
zkva += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return ((void *)retkva);
|
||||
}
|
||||
@ -3012,7 +3016,7 @@ uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
|
||||
|
||||
/* See uma.h */
|
||||
int
|
||||
uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
|
||||
uma_zone_reserve_kva(uma_zone_t zone, int count)
|
||||
{
|
||||
uma_keg_t keg;
|
||||
vm_offset_t kva;
|
||||
@ -3024,21 +3028,25 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
|
||||
if (pages * keg->uk_ipers < count)
|
||||
pages++;
|
||||
|
||||
kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
|
||||
|
||||
if (kva == 0)
|
||||
return (0);
|
||||
if (obj == NULL)
|
||||
obj = vm_object_allocate(OBJT_PHYS, pages);
|
||||
else {
|
||||
VM_OBJECT_LOCK_INIT(obj, "uma object");
|
||||
_vm_object_allocate(OBJT_PHYS, pages, obj);
|
||||
}
|
||||
#ifdef UMA_MD_SMALL_ALLOC
|
||||
if (keg->uk_ppera > 1) {
|
||||
#else
|
||||
if (1) {
|
||||
#endif
|
||||
kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
|
||||
if (kva == 0)
|
||||
return (0);
|
||||
} else
|
||||
kva = 0;
|
||||
ZONE_LOCK(zone);
|
||||
keg->uk_kva = kva;
|
||||
keg->uk_obj = obj;
|
||||
keg->uk_offset = 0;
|
||||
keg->uk_maxpages = pages;
|
||||
keg->uk_allocf = obj_alloc;
|
||||
#ifdef UMA_MD_SMALL_ALLOC
|
||||
keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
|
||||
#else
|
||||
keg->uk_allocf = noobj_alloc;
|
||||
#endif
|
||||
keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
|
||||
ZONE_UNLOCK(zone);
|
||||
return (1);
|
||||
|
@ -221,8 +221,8 @@ struct uma_keg {
|
||||
uma_alloc uk_allocf; /* Allocation function */
|
||||
uma_free uk_freef; /* Free routine */
|
||||
|
||||
struct vm_object *uk_obj; /* Zone specific object */
|
||||
vm_offset_t uk_kva; /* Base kva for zones with objs */
|
||||
u_long uk_offset; /* Next free offset from base KVA */
|
||||
vm_offset_t uk_kva; /* Zone base KVA */
|
||||
uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
|
||||
|
||||
u_int16_t uk_pgoff; /* Offset to uma_slab struct */
|
||||
|
@ -125,7 +125,6 @@ static uma_zone_t mapentzone;
|
||||
static uma_zone_t kmapentzone;
|
||||
static uma_zone_t mapzone;
|
||||
static uma_zone_t vmspace_zone;
|
||||
static struct vm_object kmapentobj;
|
||||
static int vmspace_zinit(void *mem, int size, int flags);
|
||||
static void vmspace_zfini(void *mem, int size);
|
||||
static int vm_map_zinit(void *mem, int ize, int flags);
|
||||
@ -303,7 +302,7 @@ vmspace_alloc(min, max)
|
||||
void
|
||||
vm_init2(void)
|
||||
{
|
||||
uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
|
||||
uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count,
|
||||
(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
|
||||
maxproc * 2 + maxfiles);
|
||||
vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
|
||||
|
@ -194,7 +194,7 @@ vm_object_zinit(void *mem, int size, int flags)
|
||||
|
||||
object = (vm_object_t)mem;
|
||||
bzero(&object->mtx, sizeof(object->mtx));
|
||||
VM_OBJECT_LOCK_INIT(object, "standard object");
|
||||
mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK);
|
||||
|
||||
/* These are true for any object that has been freed */
|
||||
object->paging_in_progress = 0;
|
||||
@ -203,7 +203,7 @@ vm_object_zinit(void *mem, int size, int flags)
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
|
||||
{
|
||||
|
||||
@ -265,7 +265,7 @@ vm_object_init(void)
|
||||
TAILQ_INIT(&vm_object_list);
|
||||
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
|
||||
|
||||
VM_OBJECT_LOCK_INIT(kernel_object, "kernel object");
|
||||
mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF);
|
||||
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
|
||||
kernel_object);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
@ -273,7 +273,7 @@ vm_object_init(void)
|
||||
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
|
||||
#endif
|
||||
|
||||
VM_OBJECT_LOCK_INIT(kmem_object, "kmem object");
|
||||
mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF);
|
||||
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
|
||||
kmem_object);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
|
@ -208,9 +208,6 @@ extern struct vm_object kmem_object_store;
|
||||
#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
|
||||
#define VM_OBJECT_LOCK_ASSERT(object, type) \
|
||||
mtx_assert(&(object)->mtx, (type))
|
||||
#define VM_OBJECT_LOCK_INIT(object, type) \
|
||||
mtx_init(&(object)->mtx, "vm object", \
|
||||
(type), MTX_DEF | MTX_DUPOK)
|
||||
#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx)
|
||||
#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
|
||||
msleep((wchan), &(object)->mtx, (pri), \
|
||||
@ -236,7 +233,6 @@ void vm_object_pip_wakeupn(vm_object_t object, short i);
|
||||
void vm_object_pip_wait(vm_object_t object, char *waitid);
|
||||
|
||||
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
|
||||
void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
|
||||
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
|
||||
boolean_t);
|
||||
void vm_object_collapse (vm_object_t);
|
||||
|
Loading…
x
Reference in New Issue
Block a user