Make MAXPHYS tunable. Bump MAXPHYS to 1M.
Replace MAXPHYS by runtime variable maxphys. It is initialized from MAXPHYS by default, but can be also adjusted with the tunable kern.maxphys. Make b_pages[] array in struct buf flexible. Size b_pages[] for buffer cache buffers exactly to atop(maxbcachebuf) (currently it is sized to atop(MAXPHYS)), and b_pages[] for pbufs is sized to atop(maxphys) + 1. The +1 for pbufs allow several pbuf consumers, among them vmapbuf(), to use unaligned buffers still sized to maxphys, esp. when such buffers come from userspace (*). Overall, we save significant amount of otherwise wasted memory in b_pages[] for buffer cache buffers, while bumping MAXPHYS to desired high value. Eliminate all direct uses of the MAXPHYS constant in kernel and driver sources, except a place which initialize maxphys. Some random (and arguably weird) uses of MAXPHYS, e.g. in linuxolator, are converted straight. Some drivers, which use MAXPHYS to size embeded structures, get private MAXPHYS-like constant; their convertion is out of scope for this work. Changes to cam/, dev/ahci, dev/ata, dev/mpr, dev/mpt, dev/mvs, dev/siis, where either submitted by, or based on changes by mav. Suggested by: mav (*) Reviewed by: imp, mav, imp, mckusick, scottl (intermediate versions) Tested by: pho Sponsored by: The FreeBSD Foundation Differential revision: https://reviews.freebsd.org/D27225
This commit is contained in:
parent
1b9c78611d
commit
cd85379104
@ -3447,8 +3447,8 @@ adasetgeom(struct ada_softc *softc, struct ccb_getdev *cgd)
|
||||
maxio = softc->cpi.maxio; /* Honor max I/O size of SIM */
|
||||
if (maxio == 0)
|
||||
maxio = DFLTPHYS; /* traditional default */
|
||||
else if (maxio > MAXPHYS)
|
||||
maxio = MAXPHYS; /* for safety */
|
||||
else if (maxio > maxphys)
|
||||
maxio = maxphys; /* for safety */
|
||||
if (softc->flags & ADA_FLAG_CAN_48BIT)
|
||||
maxio = min(maxio, 65536 * softc->params.secsize);
|
||||
else /* 28bit ATA command limit */
|
||||
|
@ -368,7 +368,7 @@ cam_compat_translate_dev_match_0x18(union ccb *ccb)
|
||||
|
||||
/* Remap the CCB into kernel address space */
|
||||
bzero(&mapinfo, sizeof(mapinfo));
|
||||
cam_periph_mapmem(ccb, &mapinfo, MAXPHYS);
|
||||
cam_periph_mapmem(ccb, &mapinfo, maxphys);
|
||||
|
||||
dm = ccb->cdm.matches;
|
||||
/* Translate in-place: old fields are smaller */
|
||||
|
@ -772,7 +772,7 @@ camperiphfree(struct cam_periph *periph)
|
||||
* Map user virtual pointers into kernel virtual address space, so we can
|
||||
* access the memory. This is now a generic function that centralizes most
|
||||
* of the sanity checks on the data flags, if any.
|
||||
* This also only works for up to MAXPHYS memory. Since we use
|
||||
* This also only works for up to maxphys memory. Since we use
|
||||
* buffers to map stuff in and out, we're limited to the buffer size.
|
||||
*/
|
||||
int
|
||||
@ -788,8 +788,8 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
|
||||
bzero(mapinfo, sizeof(*mapinfo));
|
||||
if (maxmap == 0)
|
||||
maxmap = DFLTPHYS; /* traditional default */
|
||||
else if (maxmap > MAXPHYS)
|
||||
maxmap = MAXPHYS; /* for safety */
|
||||
else if (maxmap > maxphys)
|
||||
maxmap = maxphys; /* for safety */
|
||||
switch(ccb->ccb_h.func_code) {
|
||||
case XPT_DEV_MATCH:
|
||||
if (ccb->cdm.match_buf_len == 0) {
|
||||
@ -813,9 +813,9 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
|
||||
}
|
||||
/*
|
||||
* This request will not go to the hardware, no reason
|
||||
* to be so strict. vmapbuf() is able to map up to MAXPHYS.
|
||||
* to be so strict. vmapbuf() is able to map up to maxphys.
|
||||
*/
|
||||
maxmap = MAXPHYS;
|
||||
maxmap = maxphys;
|
||||
break;
|
||||
case XPT_SCSI_IO:
|
||||
case XPT_CONT_TARGET_IO:
|
||||
@ -881,9 +881,9 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
|
||||
|
||||
/*
|
||||
* This request will not go to the hardware, no reason
|
||||
* to be so strict. vmapbuf() is able to map up to MAXPHYS.
|
||||
* to be so strict. vmapbuf() is able to map up to maxphys.
|
||||
*/
|
||||
maxmap = MAXPHYS;
|
||||
maxmap = maxphys;
|
||||
break;
|
||||
default:
|
||||
return(EINVAL);
|
||||
@ -911,7 +911,7 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
|
||||
* boundary.
|
||||
*/
|
||||
misaligned[i] = (lengths[i] +
|
||||
(((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > MAXPHYS);
|
||||
(((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > maxphys);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -553,7 +553,7 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
|
||||
* Map the pattern and match buffers into kernel
|
||||
* virtual address space.
|
||||
*/
|
||||
error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
|
||||
error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
|
||||
|
||||
if (error) {
|
||||
inccb->ccb_h.path = old_path;
|
||||
|
@ -102,9 +102,11 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
#define CTLBLK_HALF_IO_SIZE (512 * 1024)
|
||||
#define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2)
|
||||
#define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, MAXPHYS)
|
||||
#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1)
|
||||
#define CTLBLK_MIN_SEG (128 * 1024)
|
||||
#define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, maxphys)
|
||||
#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MIN_SEG, 1)
|
||||
#define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2)
|
||||
#define CTLBLK_NUM_SEGS (CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG)
|
||||
|
||||
#ifdef CTLBLK_DEBUG
|
||||
#define DPRINTF(fmt, args...) \
|
||||
@ -189,10 +191,8 @@ struct ctl_be_block_softc {
|
||||
int num_luns;
|
||||
SLIST_HEAD(, ctl_be_block_lun) lun_list;
|
||||
uma_zone_t beio_zone;
|
||||
uma_zone_t buf_zone;
|
||||
#if (CTLBLK_MAX_SEG > 131072)
|
||||
uma_zone_t buf128_zone;
|
||||
#endif
|
||||
uma_zone_t bufmin_zone;
|
||||
uma_zone_t bufmax_zone;
|
||||
};
|
||||
|
||||
static struct ctl_be_block_softc backend_block_softc;
|
||||
@ -307,12 +307,13 @@ ctl_alloc_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg,
|
||||
size_t len)
|
||||
{
|
||||
|
||||
#if (CTLBLK_MAX_SEG > 131072)
|
||||
if (len <= 131072)
|
||||
sg->addr = uma_zalloc(softc->buf128_zone, M_WAITOK);
|
||||
else
|
||||
#endif
|
||||
sg->addr = uma_zalloc(softc->buf_zone, M_WAITOK);
|
||||
if (len <= CTLBLK_MIN_SEG) {
|
||||
sg->addr = uma_zalloc(softc->bufmin_zone, M_WAITOK);
|
||||
} else {
|
||||
KASSERT(len <= CTLBLK_MAX_SEG,
|
||||
("Too large alloc %zu > %lu", len, CTLBLK_MAX_SEG));
|
||||
sg->addr = uma_zalloc(softc->bufmax_zone, M_WAITOK);
|
||||
}
|
||||
sg->len = len;
|
||||
}
|
||||
|
||||
@ -320,12 +321,13 @@ static void
|
||||
ctl_free_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg)
|
||||
{
|
||||
|
||||
#if (CTLBLK_MAX_SEG > 131072)
|
||||
if (sg->len <= 131072)
|
||||
uma_zfree(softc->buf128_zone, sg->addr);
|
||||
else
|
||||
#endif
|
||||
uma_zfree(softc->buf_zone, sg->addr);
|
||||
if (sg->len <= CTLBLK_MIN_SEG) {
|
||||
uma_zfree(softc->bufmin_zone, sg->addr);
|
||||
} else {
|
||||
KASSERT(sg->len <= CTLBLK_MAX_SEG,
|
||||
("Too large free %zu > %lu", sg->len, CTLBLK_MAX_SEG));
|
||||
uma_zfree(softc->bufmax_zone, sg->addr);
|
||||
}
|
||||
}
|
||||
|
||||
static struct ctl_be_block_io *
|
||||
@ -1344,7 +1346,7 @@ ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
|
||||
else
|
||||
pbo = 0;
|
||||
len_left = (uint64_t)lbalen->len * cbe_lun->blocksize;
|
||||
for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) {
|
||||
for (i = 0, lba = 0; i < CTLBLK_NUM_SEGS && len_left > 0; i++) {
|
||||
/*
|
||||
* Setup the S/G entry for this chunk.
|
||||
*/
|
||||
@ -1631,7 +1633,7 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
|
||||
* Setup the S/G entry for this chunk.
|
||||
*/
|
||||
ctl_alloc_seg(softc, &beio->sg_segs[i],
|
||||
min(CTLBLK_MAX_SEG, len_left));
|
||||
MIN(CTLBLK_MAX_SEG, len_left));
|
||||
|
||||
DPRINTF("segment %d addr %p len %zd\n", i,
|
||||
beio->sg_segs[i].addr, beio->sg_segs[i].len);
|
||||
@ -2802,12 +2804,11 @@ ctl_be_block_init(void)
|
||||
mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
|
||||
softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
softc->buf_zone = uma_zcreate("ctlblock", CTLBLK_MAX_SEG,
|
||||
softc->bufmin_zone = uma_zcreate("ctlblockmin", CTLBLK_MIN_SEG,
|
||||
NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
|
||||
#if (CTLBLK_MAX_SEG > 131072)
|
||||
softc->buf128_zone = uma_zcreate("ctlblock128", 131072,
|
||||
NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
|
||||
#endif
|
||||
if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG)
|
||||
softc->bufmax_zone = uma_zcreate("ctlblockmax", CTLBLK_MAX_SEG,
|
||||
NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
|
||||
SLIST_INIT(&softc->lun_list);
|
||||
return (0);
|
||||
}
|
||||
@ -2832,10 +2833,9 @@ ctl_be_block_shutdown(void)
|
||||
mtx_lock(&softc->lock);
|
||||
}
|
||||
mtx_unlock(&softc->lock);
|
||||
uma_zdestroy(softc->buf_zone);
|
||||
#if (CTLBLK_MAX_SEG > 131072)
|
||||
uma_zdestroy(softc->buf128_zone);
|
||||
#endif
|
||||
uma_zdestroy(softc->bufmin_zone);
|
||||
if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG)
|
||||
uma_zdestroy(softc->bufmax_zone);
|
||||
uma_zdestroy(softc->beio_zone);
|
||||
mtx_destroy(&softc->lock);
|
||||
sx_destroy(&softc->modify_lock);
|
||||
|
@ -1592,7 +1592,7 @@ sdda_add_part(struct cam_periph *periph, u_int type, const char *name,
|
||||
part->disk->d_name = part->name;
|
||||
part->disk->d_drv1 = part;
|
||||
part->disk->d_maxsize =
|
||||
MIN(MAXPHYS, sdda_get_max_data(periph,
|
||||
MIN(maxphys, sdda_get_max_data(periph,
|
||||
(union ccb *)&cpi) * mmc_get_sector_size(periph));
|
||||
part->disk->d_unit = cnt;
|
||||
part->disk->d_flags = 0;
|
||||
|
@ -906,8 +906,8 @@ ndaregister(struct cam_periph *periph, void *arg)
|
||||
maxio = cpi.maxio; /* Honor max I/O size of SIM */
|
||||
if (maxio == 0)
|
||||
maxio = DFLTPHYS; /* traditional default */
|
||||
else if (maxio > MAXPHYS)
|
||||
maxio = MAXPHYS; /* for safety */
|
||||
else if (maxio > maxphys)
|
||||
maxio = maxphys; /* for safety */
|
||||
disk->d_maxsize = maxio;
|
||||
flbas_fmt = (nsd->flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) &
|
||||
NVME_NS_DATA_FLBAS_FORMAT_MASK;
|
||||
|
@ -696,8 +696,8 @@ cdregister(struct cam_periph *periph, void *arg)
|
||||
softc->disk->d_drv1 = periph;
|
||||
if (cpi.maxio == 0)
|
||||
softc->disk->d_maxsize = DFLTPHYS; /* traditional default */
|
||||
else if (cpi.maxio > MAXPHYS)
|
||||
softc->disk->d_maxsize = MAXPHYS; /* for safety */
|
||||
else if (cpi.maxio > maxphys)
|
||||
softc->disk->d_maxsize = maxphys; /* for safety */
|
||||
else
|
||||
softc->disk->d_maxsize = cpi.maxio;
|
||||
softc->disk->d_flags = 0;
|
||||
|
@ -2921,8 +2921,8 @@ daregister(struct cam_periph *periph, void *arg)
|
||||
softc->disk->d_drv1 = periph;
|
||||
if (cpi.maxio == 0)
|
||||
softc->maxio = DFLTPHYS; /* traditional default */
|
||||
else if (cpi.maxio > MAXPHYS)
|
||||
softc->maxio = MAXPHYS; /* for safety */
|
||||
else if (cpi.maxio > maxphys)
|
||||
softc->maxio = maxphys; /* for safety */
|
||||
else
|
||||
softc->maxio = cpi.maxio;
|
||||
if (softc->quirks & DA_Q_128KB)
|
||||
@ -4819,7 +4819,7 @@ dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
|
||||
if (maxsector == 0)
|
||||
maxsector = -1;
|
||||
}
|
||||
if (block_size >= MAXPHYS) {
|
||||
if (block_size >= maxphys) {
|
||||
xpt_print(periph->path,
|
||||
"unsupportable block size %ju\n",
|
||||
(uintmax_t) block_size);
|
||||
|
@ -583,15 +583,15 @@ passregister(struct cam_periph *periph, void *arg)
|
||||
periph->periph_name, periph->unit_number);
|
||||
snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO",
|
||||
periph->periph_name, periph->unit_number);
|
||||
softc->io_zone_size = MAXPHYS;
|
||||
softc->io_zone_size = maxphys;
|
||||
knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph));
|
||||
|
||||
xpt_path_inq(&cpi, periph->path);
|
||||
|
||||
if (cpi.maxio == 0)
|
||||
softc->maxio = DFLTPHYS; /* traditional default */
|
||||
else if (cpi.maxio > MAXPHYS)
|
||||
softc->maxio = MAXPHYS; /* for safety */
|
||||
else if (cpi.maxio > maxphys)
|
||||
softc->maxio = maxphys; /* for safety */
|
||||
else
|
||||
softc->maxio = cpi.maxio; /* real value */
|
||||
|
||||
@ -1507,7 +1507,7 @@ passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
|
||||
|
||||
/*
|
||||
* We allocate buffers in io_zone_size increments for an
|
||||
* S/G list. This will generally be MAXPHYS.
|
||||
* S/G list. This will generally be maxphys.
|
||||
*/
|
||||
if (lengths[0] <= softc->io_zone_size)
|
||||
num_segs_needed = 1;
|
||||
|
@ -2447,12 +2447,12 @@ saregister(struct cam_periph *periph, void *arg)
|
||||
|
||||
/*
|
||||
* If maxio isn't set, we fall back to DFLTPHYS. Otherwise we take
|
||||
* the smaller of cpi.maxio or MAXPHYS.
|
||||
* the smaller of cpi.maxio or maxphys.
|
||||
*/
|
||||
if (cpi.maxio == 0)
|
||||
softc->maxio = DFLTPHYS;
|
||||
else if (cpi.maxio > MAXPHYS)
|
||||
softc->maxio = MAXPHYS;
|
||||
else if (cpi.maxio > maxphys)
|
||||
softc->maxio = maxphys;
|
||||
else
|
||||
softc->maxio = cpi.maxio;
|
||||
|
||||
|
@ -327,8 +327,8 @@ sgregister(struct cam_periph *periph, void *arg)
|
||||
|
||||
if (cpi.maxio == 0)
|
||||
softc->maxio = DFLTPHYS; /* traditional default */
|
||||
else if (cpi.maxio > MAXPHYS)
|
||||
softc->maxio = MAXPHYS; /* for safety */
|
||||
else if (cpi.maxio > maxphys)
|
||||
softc->maxio = maxphys; /* for safety */
|
||||
else
|
||||
softc->maxio = cpi.maxio; /* real value */
|
||||
|
||||
|
@ -404,8 +404,8 @@ targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len,
|
||||
}
|
||||
if (cpi.maxio == 0)
|
||||
softc->maxio = DFLTPHYS; /* traditional default */
|
||||
else if (cpi.maxio > MAXPHYS)
|
||||
softc->maxio = MAXPHYS; /* for safety */
|
||||
else if (cpi.maxio > maxphys)
|
||||
softc->maxio = maxphys; /* for safety */
|
||||
else
|
||||
softc->maxio = cpi.maxio; /* real value */
|
||||
|
||||
|
@ -1928,8 +1928,8 @@ linprocfs_doauxv(PFS_FILL_ARGS)
|
||||
buflen = resid;
|
||||
if (buflen > IOSIZE_MAX)
|
||||
return (EINVAL);
|
||||
if (buflen > MAXPHYS)
|
||||
buflen = MAXPHYS;
|
||||
if (buflen > maxphys)
|
||||
buflen = maxphys;
|
||||
if (resid <= 0)
|
||||
return (0);
|
||||
|
||||
|
@ -2152,7 +2152,7 @@ linux_ifconf(struct thread *td, struct ifconf *uifc)
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
max_len = MAXPHYS - 1;
|
||||
max_len = maxphys - 1;
|
||||
|
||||
CURVNET_SET(TD_TO_VNET(td));
|
||||
/* handle the 'request buffer size' case */
|
||||
|
@ -602,7 +602,7 @@ INVARIANTS opt_global.h
|
||||
KASSERT_PANIC_OPTIONAL opt_global.h
|
||||
MAXCPU opt_global.h
|
||||
MAXMEMDOM opt_global.h
|
||||
MAXPHYS opt_global.h
|
||||
MAXPHYS opt_maxphys.h
|
||||
MCLSHIFT opt_global.h
|
||||
MUTEX_NOINLINE opt_global.h
|
||||
LOCK_PROFILING opt_global.h
|
||||
|
@ -379,7 +379,7 @@ vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
|
||||
int i, n_bios, j;
|
||||
size_t bios_size;
|
||||
|
||||
maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
|
||||
maxio = maxphys - (maxphys % cp->provider->sectorsize);
|
||||
n_bios = 0;
|
||||
|
||||
/* How many bios are required for all commands ? */
|
||||
|
@ -1191,7 +1191,7 @@ zvol_rename_minor(zvol_state_t *zv, const char *newname)
|
||||
args.mda_si_drv2 = zv;
|
||||
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, newname)
|
||||
== 0) {
|
||||
dev->si_iosize_max = MAXPHYS;
|
||||
dev->si_iosize_max = maxphys;
|
||||
zsd->zsd_cdev = dev;
|
||||
}
|
||||
}
|
||||
@ -1327,7 +1327,7 @@ zvol_create_minor_impl(const char *name)
|
||||
dmu_objset_disown(os, B_TRUE, FTAG);
|
||||
goto out_giant;
|
||||
}
|
||||
dev->si_iosize_max = MAXPHYS;
|
||||
dev->si_iosize_max = maxphys;
|
||||
zsd->zsd_cdev = dev;
|
||||
}
|
||||
(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
|
||||
|
@ -1124,8 +1124,7 @@ ahci_dmainit(device_t dev)
|
||||
error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
|
||||
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
|
||||
NULL, NULL,
|
||||
AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots,
|
||||
AHCI_SG_ENTRIES, AHCI_PRD_MAX,
|
||||
AHCI_SG_ENTRIES * PAGE_SIZE, AHCI_SG_ENTRIES, AHCI_PRD_MAX,
|
||||
0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag);
|
||||
if (error != 0)
|
||||
goto error;
|
||||
@ -1187,6 +1186,7 @@ ahci_slotsalloc(device_t dev)
|
||||
slot->ch = ch;
|
||||
slot->slot = i;
|
||||
slot->state = AHCI_SLOT_EMPTY;
|
||||
slot->ct_offset = AHCI_CT_OFFSET + AHCI_CT_SIZE * i;
|
||||
slot->ccb = NULL;
|
||||
callout_init_mtx(&slot->timeout, &ch->mtx, 0);
|
||||
|
||||
@ -1642,8 +1642,7 @@ ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
}
|
||||
KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n"));
|
||||
/* Get a piece of the workspace for this request */
|
||||
ctp = (struct ahci_cmd_tab *)
|
||||
(ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot));
|
||||
ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
|
||||
/* Fill S/G table */
|
||||
prd = &ctp->prd_tab[0];
|
||||
for (i = 0; i < nsegs; i++) {
|
||||
@ -1672,8 +1671,7 @@ ahci_execute_transaction(struct ahci_slot *slot)
|
||||
uint16_t cmd_flags;
|
||||
|
||||
/* Get a piece of the workspace for this request */
|
||||
ctp = (struct ahci_cmd_tab *)
|
||||
(ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot));
|
||||
ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
|
||||
/* Setup the FIS for this request */
|
||||
if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) {
|
||||
device_printf(ch->dev, "Setting up SATA FIS failed\n");
|
||||
@ -1710,8 +1708,7 @@ ahci_execute_transaction(struct ahci_slot *slot)
|
||||
softreset = 0;
|
||||
clp->bytecount = 0;
|
||||
clp->cmd_flags = htole16(cmd_flags);
|
||||
clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET +
|
||||
(AHCI_CT_SIZE * slot->slot));
|
||||
clp->cmd_table_phys = htole64(ch->dma.work_bus + slot->ct_offset);
|
||||
bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
|
||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
||||
bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
|
||||
@ -2868,7 +2865,7 @@ ahciaction(struct cam_sim *sim, union ccb *ccb)
|
||||
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
|
||||
cpi->protocol = PROTO_ATA;
|
||||
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = ctob(AHCI_SG_ENTRIES - 1);
|
||||
/* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */
|
||||
if (ch->quirks & AHCI_Q_MAXIO_64K)
|
||||
cpi->maxio = min(cpi->maxio, 128 * 512);
|
||||
|
@ -310,13 +310,8 @@
|
||||
#define AHCI_P_DEVSLP_DM 0x0e000000
|
||||
#define AHCI_P_DEVSLP_DM_SHIFT 25
|
||||
|
||||
/* Just to be sure, if building as module. */
|
||||
#if MAXPHYS < 512 * 1024
|
||||
#undef MAXPHYS
|
||||
#define MAXPHYS 512 * 1024
|
||||
#endif
|
||||
/* Pessimistic prognosis on number of required S/G entries */
|
||||
#define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8))
|
||||
#define AHCI_SG_ENTRIES MIN(roundup(btoc(maxphys) + 1, 8), 65528)
|
||||
/* Command list. 32 commands. First, 1Kbyte aligned. */
|
||||
#define AHCI_CL_OFFSET 0
|
||||
#define AHCI_CL_SIZE 32
|
||||
@ -344,7 +339,7 @@ struct ahci_cmd_tab {
|
||||
u_int8_t cfis[64];
|
||||
u_int8_t acmd[32];
|
||||
u_int8_t reserved[32];
|
||||
struct ahci_dma_prd prd_tab[AHCI_SG_ENTRIES];
|
||||
struct ahci_dma_prd prd_tab[];
|
||||
} __packed;
|
||||
|
||||
struct ahci_cmd_list {
|
||||
@ -394,6 +389,7 @@ struct ahci_slot {
|
||||
struct ahci_channel *ch; /* Channel */
|
||||
u_int8_t slot; /* Number of this slot */
|
||||
enum ahci_slot_states state; /* Slot state */
|
||||
u_int ct_offset; /* cmd_tab offset */
|
||||
union ccb *ccb; /* CCB occupying slot */
|
||||
struct ata_dmaslot dma; /* DMA data of this slot */
|
||||
struct callout timeout; /* Execution timeout */
|
||||
|
@ -641,7 +641,7 @@ ahciemaction(struct cam_sim *sim, union ccb *ccb)
|
||||
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
|
||||
cpi->protocol = PROTO_ATA;
|
||||
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = maxphys;
|
||||
cpi->hba_vendor = pci_get_vendor(parent);
|
||||
cpi->hba_device = pci_get_device(parent);
|
||||
cpi->hba_subvendor = pci_get_subvendor(parent);
|
||||
|
@ -139,7 +139,7 @@ ata_attach(device_t dev)
|
||||
if (ch->flags & ATA_SATA)
|
||||
ch->user[i].bytecount = 8192;
|
||||
else
|
||||
ch->user[i].bytecount = MAXPHYS;
|
||||
ch->user[i].bytecount = 65536;
|
||||
ch->user[i].caps = 0;
|
||||
ch->curr[i] = ch->user[i];
|
||||
if (ch->flags & ATA_SATA) {
|
||||
|
@ -152,7 +152,7 @@
|
||||
#define ATA_SACTIVE 16
|
||||
|
||||
/* DMA register defines */
|
||||
#define ATA_DMA_ENTRIES 256
|
||||
#define ATA_DMA_ENTRIES MAX(17, btoc(maxphys) + 1)
|
||||
#define ATA_DMA_EOT 0x80000000
|
||||
|
||||
#define ATA_BMCMD_PORT 17
|
||||
|
@ -87,7 +87,7 @@ ata_dmainit(device_t dev)
|
||||
if (ch->dma.segsize == 0)
|
||||
ch->dma.segsize = 65536;
|
||||
if (ch->dma.max_iosize == 0)
|
||||
ch->dma.max_iosize = MIN((ATA_DMA_ENTRIES - 1) * PAGE_SIZE, MAXPHYS);
|
||||
ch->dma.max_iosize = (ATA_DMA_ENTRIES - 1) * PAGE_SIZE;
|
||||
if (ch->dma.max_address == 0)
|
||||
ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT;
|
||||
if (ch->dma.dma_slots == 0)
|
||||
|
@ -71,7 +71,7 @@
|
||||
* because of CAM_SCSI2_MAXLUN in cam_xpt.c
|
||||
*/
|
||||
#define SBP_NUM_LUNS 64
|
||||
#define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */)
|
||||
#define SBP_MAXPHYS (128 * 1024)
|
||||
#define SBP_DMA_SIZE PAGE_SIZE
|
||||
#define SBP_LOGIN_SIZE sizeof(struct sbp_login_res)
|
||||
#define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
|
||||
|
@ -721,9 +721,9 @@ cqspi_attach(device_t dev)
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, MAXPHYS, 8, 16, 0,
|
||||
xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, maxphys, 8, 16, 0,
|
||||
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
|
||||
xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, MAXPHYS, 8, 16, 0,
|
||||
xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, maxphys, 8, 16, 0,
|
||||
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
|
||||
|
||||
cqspi_init(sc);
|
||||
|
@ -157,7 +157,7 @@ extern "C" {
|
||||
* posted to hardware always contain pairs of elements (with second
|
||||
* element set to zeroes if not needed).
|
||||
*/
|
||||
#define __MAXPHYS_ELEMENTS ((MAXPHYS / PAGE_SIZE) + 1)
|
||||
#define __MAXPHYS_ELEMENTS ((128 * 1024 / PAGE_SIZE) + 1)
|
||||
#define SCI_MAX_SCATTER_GATHER_ELEMENTS ((__MAXPHYS_ELEMENTS + 1) & ~0x1)
|
||||
#endif
|
||||
|
||||
|
@ -2407,7 +2407,7 @@ iscsi_action(struct cam_sim *sim, union ccb *ccb)
|
||||
cpi->transport_version = 0;
|
||||
cpi->protocol = PROTO_SCSI;
|
||||
cpi->protocol_version = SCSI_REV_SPC3;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = maxphys;
|
||||
cpi->ccb_h.status = CAM_REQ_CMP;
|
||||
break;
|
||||
}
|
||||
|
@ -960,9 +960,10 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
|
||||
piov = auio.uio_iov;
|
||||
} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
|
||||
pb = uma_zalloc(md_pbuf_zone, M_WAITOK);
|
||||
MPASS((pb->b_flags & B_MAXPHYS) != 0);
|
||||
bp->bio_resid = len;
|
||||
unmapped_step:
|
||||
npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
|
||||
npages = atop(min(maxphys, round_page(len + (ma_offs &
|
||||
PAGE_MASK))));
|
||||
iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
|
||||
KASSERT(iolen > 0, ("zero iolen"));
|
||||
@ -1684,7 +1685,7 @@ kern_mdattach_locked(struct thread *td, struct md_req *mdr)
|
||||
sectsize = DEV_BSIZE;
|
||||
else
|
||||
sectsize = mdr->md_sectorsize;
|
||||
if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize)
|
||||
if (sectsize > maxphys || mdr->md_mediasize < sectsize)
|
||||
return (EINVAL);
|
||||
if (mdr->md_options & MD_AUTOUNIT)
|
||||
sc = mdnew(-1, &error, mdr->md_type);
|
||||
|
@ -457,7 +457,7 @@ mfi_attach(struct mfi_softc *sc)
|
||||
/*
|
||||
* Get information needed for sizing the contiguous memory for the
|
||||
* frame pool. Size down the sgl parameter since we know that
|
||||
* we will never need more than what's required for MAXPHYS.
|
||||
* we will never need more than what's required for MFI_MAXPHYS.
|
||||
* It would be nice if these constants were available at runtime
|
||||
* instead of compile time.
|
||||
*/
|
||||
|
@ -436,14 +436,14 @@ mpr_resize_queues(struct mpr_softc *sc)
|
||||
|
||||
/*
|
||||
* If I/O size limitation requested then use it and pass up to CAM.
|
||||
* If not, use MAXPHYS as an optimization hint, but report HW limit.
|
||||
* If not, use maxphys as an optimization hint, but report HW limit.
|
||||
*/
|
||||
if (sc->max_io_pages > 0) {
|
||||
maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
|
||||
sc->maxio = maxio;
|
||||
} else {
|
||||
sc->maxio = maxio;
|
||||
maxio = min(maxio, MAXPHYS);
|
||||
maxio = min(maxio, maxphys);
|
||||
}
|
||||
|
||||
sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
|
||||
|
@ -418,14 +418,14 @@ mps_resize_queues(struct mps_softc *sc)
|
||||
|
||||
/*
|
||||
* If I/O size limitation requested, then use it and pass up to CAM.
|
||||
* If not, use MAXPHYS as an optimization hint, but report HW limit.
|
||||
* If not, use maxphys as an optimization hint, but report HW limit.
|
||||
*/
|
||||
if (sc->max_io_pages > 0) {
|
||||
maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
|
||||
sc->maxio = maxio;
|
||||
} else {
|
||||
sc->maxio = maxio;
|
||||
maxio = min(maxio, MAXPHYS);
|
||||
maxio = min(maxio, maxphys);
|
||||
}
|
||||
|
||||
sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
|
||||
|
@ -2691,7 +2691,7 @@ mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
|
||||
/*
|
||||
* Use this as the basis for reporting the maximum I/O size to CAM.
|
||||
*/
|
||||
mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
|
||||
mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, btoc(maxphys) + 1);
|
||||
|
||||
/* XXX Lame Locking! */
|
||||
MPT_UNLOCK(mpt);
|
||||
|
@ -668,7 +668,7 @@ struct mpt_softc {
|
||||
bus_addr_t request_phys; /* BusAddr of request memory */
|
||||
|
||||
uint32_t max_seg_cnt; /* calculated after IOC facts */
|
||||
uint32_t max_cam_seg_cnt;/* calculated from MAXPHYS*/
|
||||
uint32_t max_cam_seg_cnt;/* calculated from maxphys */
|
||||
|
||||
/*
|
||||
* Hardware management
|
||||
|
@ -1922,9 +1922,9 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
MAXPHYS, /* maxsize */
|
||||
maxphys, /* maxsize */
|
||||
sc->max_num_sge, /* nsegments */
|
||||
MAXPHYS, /* maxsegsize */
|
||||
maxphys, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, NULL, /* lockfunc, lockarg */
|
||||
&sc->mrsas_parent_tag /* tag */
|
||||
@ -2154,9 +2154,9 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
|
||||
BUS_SPACE_MAXADDR,
|
||||
BUS_SPACE_MAXADDR,
|
||||
NULL, NULL,
|
||||
MAXPHYS,
|
||||
maxphys,
|
||||
sc->max_num_sge, /* nsegments */
|
||||
MAXPHYS,
|
||||
maxphys,
|
||||
BUS_DMA_ALLOCNOW,
|
||||
busdma_lock_mutex,
|
||||
&sc->io_lock,
|
||||
|
@ -370,8 +370,7 @@ mvs_dmainit(device_t dev)
|
||||
if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, MVS_EPRD_MAX,
|
||||
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
|
||||
NULL, NULL,
|
||||
MVS_SG_ENTRIES * PAGE_SIZE * MVS_MAX_SLOTS,
|
||||
MVS_SG_ENTRIES, MVS_EPRD_MAX,
|
||||
MVS_SG_ENTRIES * PAGE_SIZE, MVS_SG_ENTRIES, MVS_EPRD_MAX,
|
||||
0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) {
|
||||
goto error;
|
||||
}
|
||||
@ -438,6 +437,7 @@ mvs_slotsalloc(device_t dev)
|
||||
slot->dev = dev;
|
||||
slot->slot = i;
|
||||
slot->state = MVS_SLOT_EMPTY;
|
||||
slot->eprd_offset = MVS_EPRD_OFFSET + MVS_EPRD_SIZE * i;
|
||||
slot->ccb = NULL;
|
||||
callout_init_mtx(&slot->timeout, &ch->mtx, 0);
|
||||
|
||||
@ -1286,8 +1286,7 @@ mvs_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
} else {
|
||||
slot->dma.addr = 0;
|
||||
/* Get a piece of the workspace for this EPRD */
|
||||
eprd = (struct mvs_eprd *)
|
||||
(ch->dma.workrq + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot));
|
||||
eprd = (struct mvs_eprd *)(ch->dma.workrq + slot->eprd_offset);
|
||||
/* Fill S/G table */
|
||||
for (i = 0; i < nsegs; i++) {
|
||||
eprd[i].prdbal = htole32(segs[i].ds_addr);
|
||||
@ -1405,8 +1404,7 @@ mvs_legacy_execute_transaction(struct mvs_slot *slot)
|
||||
DELAY(10);
|
||||
if (ch->basic_dma) {
|
||||
/* Start basic DMA. */
|
||||
eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET +
|
||||
(MVS_EPRD_SIZE * slot->slot);
|
||||
eprd = ch->dma.workrq_bus + slot->eprd_offset;
|
||||
ATA_OUTL(ch->r_mem, DMA_DTLBA, eprd);
|
||||
ATA_OUTL(ch->r_mem, DMA_DTHBA, (eprd >> 16) >> 16);
|
||||
ATA_OUTL(ch->r_mem, DMA_C, DMA_C_START |
|
||||
@ -1433,7 +1431,7 @@ mvs_execute_transaction(struct mvs_slot *slot)
|
||||
int i;
|
||||
|
||||
/* Get address of the prepared EPRD */
|
||||
eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot);
|
||||
eprd = ch->dma.workrq_bus + slot->eprd_offset;
|
||||
/* Prepare CRQB. Gen IIe uses different CRQB format. */
|
||||
if (ch->quirks & MVS_Q_GENIIE) {
|
||||
crqb2e = (struct mvs_crqb_gen2e *)
|
||||
@ -2423,7 +2421,7 @@ mvsaction(struct cam_sim *sim, union ccb *ccb)
|
||||
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
|
||||
cpi->protocol = PROTO_ATA;
|
||||
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = maxphys;
|
||||
if ((ch->quirks & MVS_Q_SOC) == 0) {
|
||||
cpi->hba_vendor = pci_get_vendor(parent);
|
||||
cpi->hba_device = pci_get_device(parent);
|
||||
|
@ -392,7 +392,7 @@
|
||||
#define MVS_MAX_SLOTS 32
|
||||
|
||||
/* Pessimistic prognosis on number of required S/G entries */
|
||||
#define MVS_SG_ENTRIES (btoc(MAXPHYS) + 1)
|
||||
#define MVS_SG_ENTRIES (btoc(maxphys) + 1)
|
||||
|
||||
/* EDMA Command Request Block (CRQB) Data */
|
||||
struct mvs_crqb {
|
||||
@ -505,6 +505,7 @@ struct mvs_slot {
|
||||
int slot; /* Number of this slot */
|
||||
int tag; /* Used command tag */
|
||||
enum mvs_slot_states state; /* Slot state */
|
||||
u_int eprd_offset; /* EPRD offset */
|
||||
union ccb *ccb; /* CCB occupying slot */
|
||||
struct ata_dmaslot dma; /* DMA data of this slot */
|
||||
struct callout timeout; /* Execution timeout */
|
||||
|
@ -60,7 +60,7 @@
|
||||
#define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF)
|
||||
|
||||
/* Cap nvme to 1MB transfers driver explodes with larger sizes */
|
||||
#define NVME_MAX_XFER_SIZE (MAXPHYS < (1<<20) ? MAXPHYS : (1<<20))
|
||||
#define NVME_MAX_XFER_SIZE (maxphys < (1<<20) ? maxphys : (1<<20))
|
||||
|
||||
/* Register field definitions */
|
||||
#define NVME_CAP_LO_REG_MQES_SHIFT (0)
|
||||
|
@ -1248,13 +1248,13 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
if (pt->len > 0) {
|
||||
/*
|
||||
* vmapbuf calls vm_fault_quick_hold_pages which only maps full
|
||||
* pages. Ensure this request has fewer than MAXPHYS bytes when
|
||||
* pages. Ensure this request has fewer than maxphys bytes when
|
||||
* extended to full pages.
|
||||
*/
|
||||
addr = (vm_offset_t)pt->buf;
|
||||
end = round_page(addr + pt->len);
|
||||
addr = trunc_page(addr);
|
||||
if (end - addr > MAXPHYS)
|
||||
if (end - addr > maxphys)
|
||||
return EIO;
|
||||
|
||||
if (pt->len > ctrlr->max_xfer_size) {
|
||||
|
@ -62,7 +62,7 @@ EW 09-17-2004 1.0.0 Constant definitions
|
||||
#define AGTIAPI_MAX_DEVICE_7H 256 /*Max devices per channel in 7H */
|
||||
#define AGTIAPI_MAX_DEVICE_8H 512 /*Max devices per channel in 8H*/
|
||||
#define AGTIAPI_MAX_CAM_Q_DEPTH 1024
|
||||
#define AGTIAPI_NSEGS (MAXPHYS / PAGE_SIZE)
|
||||
#define AGTIAPI_NSEGS (maxphys / PAGE_SIZE)
|
||||
/*
|
||||
** Adapter specific defines
|
||||
*/
|
||||
|
@ -1623,8 +1623,8 @@ int agtiapi_alloc_requests( struct agtiapi_softc *pmcsc )
|
||||
|
||||
nsegs = AGTIAPI_NSEGS;
|
||||
rsize = AGTIAPI_MAX_DMA_SEGS; // 128
|
||||
AGTIAPI_PRINTK( "agtiapi_alloc_requests: MAXPHYS 0x%x PAGE_SIZE 0x%x \n",
|
||||
MAXPHYS, PAGE_SIZE );
|
||||
AGTIAPI_PRINTK( "agtiapi_alloc_requests: maxphys 0x%lx PAGE_SIZE 0x%x \n",
|
||||
maxphys, PAGE_SIZE );
|
||||
AGTIAPI_PRINTK( "agtiapi_alloc_requests: nsegs %d rsize %d \n",
|
||||
nsegs, rsize ); // 32, 128
|
||||
// This is for csio->data_ptr
|
||||
|
@ -722,19 +722,19 @@ sdhci_dma_alloc(struct sdhci_slot *slot)
|
||||
int err;
|
||||
|
||||
if (!(slot->quirks & SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY)) {
|
||||
if (MAXPHYS <= 1024 * 4)
|
||||
if (maxphys <= 1024 * 4)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K;
|
||||
else if (MAXPHYS <= 1024 * 8)
|
||||
else if (maxphys <= 1024 * 8)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_8K;
|
||||
else if (MAXPHYS <= 1024 * 16)
|
||||
else if (maxphys <= 1024 * 16)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_16K;
|
||||
else if (MAXPHYS <= 1024 * 32)
|
||||
else if (maxphys <= 1024 * 32)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_32K;
|
||||
else if (MAXPHYS <= 1024 * 64)
|
||||
else if (maxphys <= 1024 * 64)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_64K;
|
||||
else if (MAXPHYS <= 1024 * 128)
|
||||
else if (maxphys <= 1024 * 128)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_128K;
|
||||
else if (MAXPHYS <= 1024 * 256)
|
||||
else if (maxphys <= 1024 * 256)
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_256K;
|
||||
else
|
||||
slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_512K;
|
||||
@ -2534,7 +2534,7 @@ sdhci_cam_action(struct cam_sim *sim, union ccb *ccb)
|
||||
|
||||
switch (ccb->ccb_h.func_code) {
|
||||
case XPT_PATH_INQ:
|
||||
mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, MAXPHYS);
|
||||
mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, maxphys);
|
||||
break;
|
||||
|
||||
case XPT_GET_TRAN_SETTINGS:
|
||||
|
@ -688,8 +688,7 @@ siis_dmainit(device_t dev)
|
||||
if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
|
||||
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
|
||||
NULL, NULL,
|
||||
SIIS_SG_ENTRIES * PAGE_SIZE * SIIS_MAX_SLOTS,
|
||||
SIIS_SG_ENTRIES, 0xFFFFFFFF,
|
||||
SIIS_SG_ENTRIES * PAGE_SIZE, SIIS_SG_ENTRIES, 0xFFFFFFFF,
|
||||
0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) {
|
||||
goto error;
|
||||
}
|
||||
@ -745,6 +744,7 @@ siis_slotsalloc(device_t dev)
|
||||
slot->dev = dev;
|
||||
slot->slot = i;
|
||||
slot->state = SIIS_SLOT_EMPTY;
|
||||
slot->prb_offset = SIIS_PRB_SIZE * i;
|
||||
slot->ccb = NULL;
|
||||
callout_init_mtx(&slot->timeout, &ch->mtx, 0);
|
||||
|
||||
@ -1034,8 +1034,7 @@ siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
slot->dma.nsegs = nsegs;
|
||||
if (nsegs != 0) {
|
||||
/* Get a piece of the workspace for this request */
|
||||
ctp = (struct siis_cmd *)(ch->dma.work + SIIS_CT_OFFSET +
|
||||
(SIIS_CT_SIZE * slot->slot));
|
||||
ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset);
|
||||
/* Fill S/G table */
|
||||
if (slot->ccb->ccb_h.func_code == XPT_ATA_IO)
|
||||
prd = &ctp->u.ata.prd[0];
|
||||
@ -1066,8 +1065,7 @@ siis_execute_transaction(struct siis_slot *slot)
|
||||
|
||||
mtx_assert(&ch->mtx, MA_OWNED);
|
||||
/* Get a piece of the workspace for this request */
|
||||
ctp = (struct siis_cmd *)
|
||||
(ch->dma.work + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot));
|
||||
ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset);
|
||||
ctp->control = 0;
|
||||
ctp->protocol_override = 0;
|
||||
ctp->transfer_count = 0;
|
||||
@ -1117,8 +1115,7 @@ siis_execute_transaction(struct siis_slot *slot)
|
||||
/* Issue command to the controller. */
|
||||
slot->state = SIIS_SLOT_RUNNING;
|
||||
ch->rslots |= (1 << slot->slot);
|
||||
prb_bus = ch->dma.work_bus +
|
||||
SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot);
|
||||
prb_bus = ch->dma.work_bus + slot->prb_offset;
|
||||
ATA_OUTL(ch->r_mem, SIIS_P_CACTL(slot->slot), prb_bus);
|
||||
ATA_OUTL(ch->r_mem, SIIS_P_CACTH(slot->slot), prb_bus >> 32);
|
||||
/* Start command execution timeout */
|
||||
@ -1967,7 +1964,7 @@ siisaction(struct cam_sim *sim, union ccb *ccb)
|
||||
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
|
||||
cpi->protocol = PROTO_ATA;
|
||||
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = maxphys;
|
||||
cpi->hba_vendor = pci_get_vendor(parent);
|
||||
cpi->hba_device = pci_get_device(parent);
|
||||
cpi->hba_subvendor = pci_get_subvendor(parent);
|
||||
|
@ -263,18 +263,12 @@
|
||||
#define SIIS_OFFSET 0x100
|
||||
#define SIIS_STEP 0x80
|
||||
|
||||
/* Just to be sure, if building as module. */
|
||||
#if MAXPHYS < 512 * 1024
|
||||
#undef MAXPHYS
|
||||
#define MAXPHYS 512 * 1024
|
||||
#endif
|
||||
/* Pessimistic prognosis on number of required S/G entries */
|
||||
#define SIIS_SG_ENTRIES (roundup(btoc(MAXPHYS), 4) + 1)
|
||||
/* Command tables. Up to 32 commands, Each, 128byte aligned. */
|
||||
#define SIIS_CT_OFFSET 0
|
||||
#define SIIS_CT_SIZE (32 + 16 + SIIS_SG_ENTRIES * 16)
|
||||
#define SIIS_SG_ENTRIES (roundup(btoc(maxphys), 4) + 1)
|
||||
/* Port Request Block + S/G entries. 128byte aligned. */
|
||||
#define SIIS_PRB_SIZE (32 + 16 + SIIS_SG_ENTRIES * 16)
|
||||
/* Total main work area. */
|
||||
#define SIIS_WORK_SIZE (SIIS_CT_OFFSET + SIIS_CT_SIZE * SIIS_MAX_SLOTS)
|
||||
#define SIIS_WORK_SIZE (SIIS_PRB_SIZE * SIIS_MAX_SLOTS)
|
||||
|
||||
struct siis_dma_prd {
|
||||
u_int64_t dba;
|
||||
@ -287,12 +281,12 @@ struct siis_dma_prd {
|
||||
} __packed;
|
||||
|
||||
struct siis_cmd_ata {
|
||||
struct siis_dma_prd prd[1 + SIIS_SG_ENTRIES];
|
||||
struct siis_dma_prd prd[2];
|
||||
} __packed;
|
||||
|
||||
struct siis_cmd_atapi {
|
||||
u_int8_t ccb[16];
|
||||
struct siis_dma_prd prd[SIIS_SG_ENTRIES];
|
||||
struct siis_dma_prd prd[1];
|
||||
} __packed;
|
||||
|
||||
struct siis_cmd {
|
||||
@ -349,6 +343,7 @@ struct siis_slot {
|
||||
device_t dev; /* Device handle */
|
||||
u_int8_t slot; /* Number of this slot */
|
||||
enum siis_slot_states state; /* Slot state */
|
||||
u_int prb_offset; /* PRB offset */
|
||||
union ccb *ccb; /* CCB occupying slot */
|
||||
struct ata_dmaslot dma; /* DMA data of this slot */
|
||||
struct callout timeout; /* Execution timeout */
|
||||
|
@ -95,9 +95,9 @@
|
||||
* Max number of scatter/gather entries for an I/O.
|
||||
* Each entry costs 8 bytes in the internal CCB data structure.
|
||||
* We use at most 33 segments but also no more than required for handling
|
||||
* MAXPHYS.
|
||||
* legacy MAXPHYS == 128 * 1024.
|
||||
*/
|
||||
#define SYM_CONF_MAX_SG (MIN(33, (MAXPHYS / PAGE_SIZE) + 1))
|
||||
#define SYM_CONF_MAX_SG (MIN(33, (128 * 1024 / PAGE_SIZE) + 1))
|
||||
|
||||
/*
|
||||
* Max number of targets.
|
||||
|
@ -2334,7 +2334,7 @@ umass_cam_action(struct cam_sim *sim, union ccb *ccb)
|
||||
case USB_SPEED_SUPER:
|
||||
cpi->base_transfer_speed =
|
||||
UMASS_SUPER_TRANSFER_SPEED;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = maxphys;
|
||||
break;
|
||||
case USB_SPEED_HIGH:
|
||||
cpi->base_transfer_speed =
|
||||
|
@ -323,7 +323,7 @@ vtblk_attach(device_t dev)
|
||||
* than the maximum supported transfer size.
|
||||
*/
|
||||
if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
|
||||
if (blkcfg.size_max < MAXPHYS) {
|
||||
if (blkcfg.size_max < maxphys) {
|
||||
error = ENOTSUP;
|
||||
device_printf(dev, "host requires unsupported "
|
||||
"maximum segment size feature\n");
|
||||
@ -623,7 +623,7 @@ vtblk_maximum_segments(struct vtblk_softc *sc,
|
||||
nsegs = VTBLK_MIN_SEGMENTS;
|
||||
|
||||
if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
|
||||
nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
|
||||
nsegs += MIN(blkcfg->seg_max, maxphys / PAGE_SIZE + 1);
|
||||
if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
|
||||
nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
|
||||
} else
|
||||
@ -713,8 +713,8 @@ vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
|
||||
* no pages are contiguous. This may impose an artificially low
|
||||
* maximum I/O size. But in practice, since QEMU advertises 128
|
||||
* segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
|
||||
* which is typically greater than MAXPHYS. Eventually we should
|
||||
* just advertise MAXPHYS and split buffers that are too big.
|
||||
* which is typically greater than maxphys. Eventually we should
|
||||
* just advertise maxphys and split buffers that are too big.
|
||||
*
|
||||
* Note we must subtract one additional segment in case of non
|
||||
* page aligned buffers.
|
||||
|
@ -450,7 +450,7 @@ vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
|
||||
nsegs = VTSCSI_MIN_SEGMENTS;
|
||||
|
||||
if (seg_max > 0) {
|
||||
nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
|
||||
nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
|
||||
if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
|
||||
nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
|
||||
} else
|
||||
|
@ -143,9 +143,10 @@ static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
|
||||
/**
|
||||
* The maximum mapped region size per request we will allow in a negotiated
|
||||
* block-front/back communication channel.
|
||||
* Use old default of MAXPHYS == 128K.
|
||||
*/
|
||||
#define XBB_MAX_REQUEST_SIZE \
|
||||
MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE)
|
||||
MIN(128 * 1024, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE)
|
||||
|
||||
/**
|
||||
* The maximum number of segments (within a request header and accompanying
|
||||
|
@ -1306,8 +1306,8 @@ xbd_connect(struct xbd_softc *sc)
|
||||
sc->xbd_max_request_segments = 0;
|
||||
if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS)
|
||||
sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS;
|
||||
if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS))
|
||||
sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS);
|
||||
if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(maxphys))
|
||||
sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(maxphys);
|
||||
sc->xbd_max_request_indirectpages =
|
||||
XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments);
|
||||
if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
|
@ -238,8 +238,8 @@ iso_mountfs(devvp, mp)
|
||||
goto out;
|
||||
if (devvp->v_rdev->si_iosize_max != 0)
|
||||
mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
|
||||
if (mp->mnt_iosize_max > MAXPHYS)
|
||||
mp->mnt_iosize_max = MAXPHYS;
|
||||
if (mp->mnt_iosize_max > maxphys)
|
||||
mp->mnt_iosize_max = maxphys;
|
||||
|
||||
bo = &devvp->v_bufobj;
|
||||
|
||||
|
@ -876,8 +876,8 @@ ext2_mountfs(struct vnode *devvp, struct mount *mp)
|
||||
bo->bo_ops = g_vfs_bufops;
|
||||
if (devvp->v_rdev->si_iosize_max != 0)
|
||||
mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
|
||||
if (mp->mnt_iosize_max > MAXPHYS)
|
||||
mp->mnt_iosize_max = MAXPHYS;
|
||||
if (mp->mnt_iosize_max > maxphys)
|
||||
mp->mnt_iosize_max = maxphys;
|
||||
|
||||
bp = NULL;
|
||||
ump = NULL;
|
||||
@ -922,7 +922,7 @@ ext2_mountfs(struct vnode *devvp, struct mount *mp)
|
||||
* in ext2fs doesn't have these variables, so we can calculate
|
||||
* them here.
|
||||
*/
|
||||
e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize);
|
||||
e2fs_maxcontig = MAX(1, maxphys / ump->um_e2fs->e2fs_bsize);
|
||||
ump->um_e2fs->e2fs_contigsumsize = MIN(e2fs_maxcontig, EXT2_MAXCONTIG);
|
||||
if (ump->um_e2fs->e2fs_contigsumsize > 0) {
|
||||
size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t);
|
||||
|
@ -441,7 +441,7 @@ fuse_vfsop_mount(struct mount *mp)
|
||||
}
|
||||
memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN);
|
||||
strlcpy(mp->mnt_stat.f_mntfromname, fspec, MNAMELEN);
|
||||
mp->mnt_iosize_max = MAXPHYS;
|
||||
mp->mnt_iosize_max = maxphys;
|
||||
|
||||
/* Now handshaking with daemon */
|
||||
fuse_internal_send_init(data, td);
|
||||
|
@ -437,8 +437,8 @@ mountmsdosfs(struct vnode *devvp, struct mount *mp)
|
||||
VOP_UNLOCK(devvp);
|
||||
if (dev->si_iosize_max != 0)
|
||||
mp->mnt_iosize_max = dev->si_iosize_max;
|
||||
if (mp->mnt_iosize_max > MAXPHYS)
|
||||
mp->mnt_iosize_max = MAXPHYS;
|
||||
if (mp->mnt_iosize_max > maxphys)
|
||||
mp->mnt_iosize_max = maxphys;
|
||||
|
||||
/*
|
||||
* Read the boot sector of the filesystem, and then check the
|
||||
|
@ -338,8 +338,8 @@ udf_mountfs(struct vnode *devvp, struct mount *mp)
|
||||
|
||||
if (devvp->v_rdev->si_iosize_max != 0)
|
||||
mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
|
||||
if (mp->mnt_iosize_max > MAXPHYS)
|
||||
mp->mnt_iosize_max = MAXPHYS;
|
||||
if (mp->mnt_iosize_max > maxphys)
|
||||
mp->mnt_iosize_max = maxphys;
|
||||
|
||||
/* XXX: should be M_WAITOK */
|
||||
udfmp = malloc(sizeof(struct udf_mnt), M_UDFMOUNT,
|
||||
|
2
sys/geom/cache/g_cache.c
vendored
2
sys/geom/cache/g_cache.c
vendored
@ -494,7 +494,7 @@ g_cache_create(struct g_class *mp, struct g_provider *pp,
|
||||
|
||||
/* Block size restrictions. */
|
||||
bshift = ffs(md->md_bsize) - 1;
|
||||
if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
|
||||
if (md->md_bsize == 0 || md->md_bsize > maxphys ||
|
||||
md->md_bsize != 1 << bshift ||
|
||||
(md->md_bsize % pp->sectorsize) != 0) {
|
||||
G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
|
||||
|
@ -352,17 +352,17 @@ g_eli_auth_write_done(struct cryptop *crp)
|
||||
|
||||
/*
|
||||
* We write more than what is requested, so we have to be ready to write
|
||||
* more than MAXPHYS.
|
||||
* more than maxphys.
|
||||
*/
|
||||
cbp2 = NULL;
|
||||
if (cbp->bio_length > MAXPHYS) {
|
||||
if (cbp->bio_length > maxphys) {
|
||||
cbp2 = g_duplicate_bio(bp);
|
||||
cbp2->bio_length = cbp->bio_length - MAXPHYS;
|
||||
cbp2->bio_data = cbp->bio_data + MAXPHYS;
|
||||
cbp2->bio_offset = cbp->bio_offset + MAXPHYS;
|
||||
cbp2->bio_length = cbp->bio_length - maxphys;
|
||||
cbp2->bio_data = cbp->bio_data + maxphys;
|
||||
cbp2->bio_offset = cbp->bio_offset + maxphys;
|
||||
cbp2->bio_to = cp->provider;
|
||||
cbp2->bio_done = g_eli_write_done;
|
||||
cbp->bio_length = MAXPHYS;
|
||||
cbp->bio_length = maxphys;
|
||||
}
|
||||
/*
|
||||
* Send encrypted data to the provider.
|
||||
@ -413,17 +413,17 @@ g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp)
|
||||
|
||||
/*
|
||||
* We read more than what is requested, so we have to be ready to read
|
||||
* more than MAXPHYS.
|
||||
* more than maxphys.
|
||||
*/
|
||||
cbp2 = NULL;
|
||||
if (cbp->bio_length > MAXPHYS) {
|
||||
if (cbp->bio_length > maxphys) {
|
||||
cbp2 = g_duplicate_bio(bp);
|
||||
cbp2->bio_length = cbp->bio_length - MAXPHYS;
|
||||
cbp2->bio_data = cbp->bio_data + MAXPHYS;
|
||||
cbp2->bio_offset = cbp->bio_offset + MAXPHYS;
|
||||
cbp2->bio_length = cbp->bio_length - maxphys;
|
||||
cbp2->bio_data = cbp->bio_data + maxphys;
|
||||
cbp2->bio_offset = cbp->bio_offset + maxphys;
|
||||
cbp2->bio_to = cp->provider;
|
||||
cbp2->bio_done = g_eli_read_done;
|
||||
cbp->bio_length = MAXPHYS;
|
||||
cbp->bio_length = maxphys;
|
||||
}
|
||||
/*
|
||||
* Read encrypted data from provider.
|
||||
|
@ -377,7 +377,7 @@ g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
|
||||
}
|
||||
dev = sc->sc_dev;
|
||||
dev->si_flags |= SI_UNMAPPED;
|
||||
dev->si_iosize_max = MAXPHYS;
|
||||
dev->si_iosize_max = maxphys;
|
||||
error = init_dumpdev(dev);
|
||||
if (error != 0)
|
||||
printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
|
||||
@ -713,14 +713,14 @@ g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
|
||||
|
||||
if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) {
|
||||
rep = &zone_args->zone_params.report;
|
||||
#define MAXENTRIES (MAXPHYS / sizeof(struct disk_zone_rep_entry))
|
||||
#define MAXENTRIES (maxphys / sizeof(struct disk_zone_rep_entry))
|
||||
if (rep->entries_allocated > MAXENTRIES)
|
||||
rep->entries_allocated = MAXENTRIES;
|
||||
alloc_size = rep->entries_allocated *
|
||||
sizeof(struct disk_zone_rep_entry);
|
||||
if (alloc_size != 0)
|
||||
new_entries = g_malloc(alloc_size,
|
||||
M_WAITOK| M_ZERO);
|
||||
M_WAITOK | M_ZERO);
|
||||
old_entries = rep->entries;
|
||||
rep->entries = new_entries;
|
||||
}
|
||||
|
@ -882,7 +882,7 @@ g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
|
||||
int errorc;
|
||||
|
||||
KASSERT(length > 0 && length >= cp->provider->sectorsize &&
|
||||
length <= MAXPHYS, ("g_read_data(): invalid length %jd",
|
||||
length <= maxphys, ("g_read_data(): invalid length %jd",
|
||||
(intmax_t)length));
|
||||
|
||||
bp = g_alloc_bio();
|
||||
@ -937,7 +937,7 @@ g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
|
||||
int error;
|
||||
|
||||
KASSERT(length > 0 && length >= cp->provider->sectorsize &&
|
||||
length <= MAXPHYS, ("g_write_data(): invalid length %jd",
|
||||
length <= maxphys, ("g_write_data(): invalid length %jd",
|
||||
(intmax_t)length));
|
||||
|
||||
bp = g_alloc_bio();
|
||||
|
@ -1053,7 +1053,7 @@ g_journal_optimize(struct bio *head)
|
||||
continue;
|
||||
}
|
||||
/* Be sure we don't end up with too big bio. */
|
||||
if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
|
||||
if (pbp->bio_length + cbp->bio_length > maxphys) {
|
||||
pbp = cbp;
|
||||
continue;
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ struct g_journal_entry {
|
||||
#define GJ_RECORD_HEADER_MAGIC "GJRHDR"
|
||||
#define GJ_RECORD_HEADER_NENTRIES (20)
|
||||
#define GJ_RECORD_MAX_SIZE(sc) \
|
||||
((sc)->sc_jprovider->sectorsize + GJ_RECORD_HEADER_NENTRIES * MAXPHYS)
|
||||
((sc)->sc_jprovider->sectorsize + GJ_RECORD_HEADER_NENTRIES * maxphys)
|
||||
#define GJ_VALIDATE_OFFSET(offset, sc) do { \
|
||||
if ((offset) + GJ_RECORD_MAX_SIZE(sc) >= (sc)->sc_jend) { \
|
||||
(offset) = (sc)->sc_jstart; \
|
||||
|
@ -2070,7 +2070,7 @@ g_mirror_sync_reinit(const struct g_mirror_disk *disk, struct bio *bp,
|
||||
bp->bio_to = disk->d_softc->sc_provider;
|
||||
bp->bio_caller1 = (void *)(uintptr_t)idx;
|
||||
bp->bio_offset = offset;
|
||||
bp->bio_length = MIN(MAXPHYS,
|
||||
bp->bio_length = MIN(maxphys,
|
||||
disk->d_softc->sc_mediasize - bp->bio_offset);
|
||||
}
|
||||
|
||||
@ -2128,7 +2128,7 @@ g_mirror_sync_start(struct g_mirror_disk *disk)
|
||||
bp = g_alloc_bio();
|
||||
sync->ds_bios[i] = bp;
|
||||
|
||||
bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
|
||||
bp->bio_data = malloc(maxphys, M_MIRROR, M_WAITOK);
|
||||
bp->bio_caller1 = (void *)(uintptr_t)i;
|
||||
g_mirror_sync_reinit(disk, bp, sync->ds_offset);
|
||||
sync->ds_offset += bp->bio_length;
|
||||
|
@ -381,7 +381,7 @@ g_nop_create(struct gctl_req *req, struct g_class *mp, struct g_provider *pp,
|
||||
gctl_error(req, "Invalid secsize for provider %s.", pp->name);
|
||||
return (EINVAL);
|
||||
}
|
||||
if (secsize > MAXPHYS) {
|
||||
if (secsize > maxphys) {
|
||||
gctl_error(req, "secsize is too big.");
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -582,10 +582,10 @@ g_part_apm_write(struct g_part_table *basetable, struct g_consumer *cp)
|
||||
baseentry = LIST_NEXT(baseentry, gpe_entry);
|
||||
}
|
||||
|
||||
for (index = 0; index < tblsz; index += MAXPHYS / pp->sectorsize) {
|
||||
for (index = 0; index < tblsz; index += maxphys / pp->sectorsize) {
|
||||
error = g_write_data(cp, (1 + index) * pp->sectorsize,
|
||||
buf + index * pp->sectorsize,
|
||||
(tblsz - index > MAXPHYS / pp->sectorsize) ? MAXPHYS:
|
||||
(tblsz - index > maxphys / pp->sectorsize) ? maxphys:
|
||||
(tblsz - index) * pp->sectorsize);
|
||||
if (error) {
|
||||
g_free(buf);
|
||||
|
@ -552,8 +552,8 @@ gpt_read_tbl(struct g_part_gpt_table *table, struct g_consumer *cp,
|
||||
tblsz = hdr->hdr_entries * hdr->hdr_entsz;
|
||||
sectors = howmany(tblsz, pp->sectorsize);
|
||||
buf = g_malloc(sectors * pp->sectorsize, M_WAITOK | M_ZERO);
|
||||
for (idx = 0; idx < sectors; idx += MAXPHYS / pp->sectorsize) {
|
||||
size = (sectors - idx > MAXPHYS / pp->sectorsize) ? MAXPHYS:
|
||||
for (idx = 0; idx < sectors; idx += maxphys / pp->sectorsize) {
|
||||
size = (sectors - idx > maxphys / pp->sectorsize) ? maxphys:
|
||||
(sectors - idx) * pp->sectorsize;
|
||||
p = g_read_data(cp, (table->lba[elt] + idx) * pp->sectorsize,
|
||||
size, &error);
|
||||
@ -1237,11 +1237,11 @@ g_part_gpt_write(struct g_part_table *basetable, struct g_consumer *cp)
|
||||
crc = crc32(buf, table->hdr->hdr_size);
|
||||
le32enc(buf + 16, crc);
|
||||
|
||||
for (index = 0; index < tblsz; index += MAXPHYS / pp->sectorsize) {
|
||||
for (index = 0; index < tblsz; index += maxphys / pp->sectorsize) {
|
||||
error = g_write_data(cp,
|
||||
(table->lba[GPT_ELT_PRITBL] + index) * pp->sectorsize,
|
||||
buf + (index + 1) * pp->sectorsize,
|
||||
(tblsz - index > MAXPHYS / pp->sectorsize) ? MAXPHYS:
|
||||
(tblsz - index > maxphys / pp->sectorsize) ? maxphys :
|
||||
(tblsz - index) * pp->sectorsize);
|
||||
if (error)
|
||||
goto out;
|
||||
@ -1259,11 +1259,11 @@ g_part_gpt_write(struct g_part_table *basetable, struct g_consumer *cp)
|
||||
crc = crc32(buf, table->hdr->hdr_size);
|
||||
le32enc(buf + 16, crc);
|
||||
|
||||
for (index = 0; index < tblsz; index += MAXPHYS / pp->sectorsize) {
|
||||
for (index = 0; index < tblsz; index += maxphys / pp->sectorsize) {
|
||||
error = g_write_data(cp,
|
||||
(table->lba[GPT_ELT_SECTBL] + index) * pp->sectorsize,
|
||||
buf + (index + 1) * pp->sectorsize,
|
||||
(tblsz - index > MAXPHYS / pp->sectorsize) ? MAXPHYS:
|
||||
(tblsz - index > maxphys / pp->sectorsize) ? maxphys :
|
||||
(tblsz - index) * pp->sectorsize);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -1020,10 +1020,10 @@ ldm_vmdb_parse(struct ldm_db *db, struct g_consumer *cp)
|
||||
pp = cp->provider;
|
||||
size = howmany(db->dh.last_seq * db->dh.size, pp->sectorsize);
|
||||
size -= 1; /* one sector takes vmdb header */
|
||||
for (n = 0; n < size; n += MAXPHYS / pp->sectorsize) {
|
||||
for (n = 0; n < size; n += maxphys / pp->sectorsize) {
|
||||
offset = db->ph.db_offset + db->th.conf_offset + n + 1;
|
||||
sectors = (size - n) > (MAXPHYS / pp->sectorsize) ?
|
||||
MAXPHYS / pp->sectorsize: size - n;
|
||||
sectors = (size - n) > (maxphys / pp->sectorsize) ?
|
||||
maxphys / pp->sectorsize : size - n;
|
||||
/* read VBLKs */
|
||||
buf = g_read_data(cp, offset * pp->sectorsize,
|
||||
sectors * pp->sectorsize, &error);
|
||||
|
@ -1160,12 +1160,12 @@ ddf_meta_read(struct g_consumer *cp, struct ddf_meta *meta)
|
||||
(GET16(meta, hdr->Configuration_Record_Length) * ss - 512) / 12));
|
||||
}
|
||||
|
||||
if (GET32(meta, hdr->cd_length) * ss >= MAXPHYS ||
|
||||
GET32(meta, hdr->pdr_length) * ss >= MAXPHYS ||
|
||||
GET32(meta, hdr->vdr_length) * ss >= MAXPHYS ||
|
||||
GET32(meta, hdr->cr_length) * ss >= MAXPHYS ||
|
||||
GET32(meta, hdr->pdd_length) * ss >= MAXPHYS ||
|
||||
GET32(meta, hdr->bbmlog_length) * ss >= MAXPHYS) {
|
||||
if (GET32(meta, hdr->cd_length) * ss >= maxphys ||
|
||||
GET32(meta, hdr->pdr_length) * ss >= maxphys ||
|
||||
GET32(meta, hdr->vdr_length) * ss >= maxphys ||
|
||||
GET32(meta, hdr->cr_length) * ss >= maxphys ||
|
||||
GET32(meta, hdr->pdd_length) * ss >= maxphys ||
|
||||
GET32(meta, hdr->bbmlog_length) * ss >= maxphys) {
|
||||
G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name);
|
||||
goto hdrerror;
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ promise_meta_read(struct g_consumer *cp, struct promise_raid_conf **metaarr)
|
||||
pp = cp->provider;
|
||||
subdisks = 0;
|
||||
|
||||
if (pp->sectorsize * 4 > MAXPHYS) {
|
||||
if (pp->sectorsize * 4 > maxphys) {
|
||||
G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name);
|
||||
return (subdisks);
|
||||
}
|
||||
|
@ -1723,7 +1723,7 @@ g_raid3_sync_request(struct bio *bp)
|
||||
g_reset_bio(bp);
|
||||
bp->bio_cmd = BIO_READ;
|
||||
bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
|
||||
bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
|
||||
bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset);
|
||||
sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
|
||||
bp->bio_done = g_raid3_sync_done;
|
||||
bp->bio_data = data;
|
||||
@ -1752,7 +1752,7 @@ g_raid3_sync_request(struct bio *bp)
|
||||
if (boffset < moffset)
|
||||
moffset = boffset;
|
||||
}
|
||||
if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
|
||||
if (sync->ds_offset_done + maxphys * 100 < moffset) {
|
||||
/* Update offset_done on every 100 blocks. */
|
||||
sync->ds_offset_done = moffset;
|
||||
g_raid3_update_metadata(disk);
|
||||
@ -2241,10 +2241,10 @@ g_raid3_sync_start(struct g_raid3_softc *sc)
|
||||
disk->d_sync.ds_bios[n] = bp;
|
||||
bp->bio_parent = NULL;
|
||||
bp->bio_cmd = BIO_READ;
|
||||
bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
|
||||
bp->bio_data = malloc(maxphys, M_RAID3, M_WAITOK);
|
||||
bp->bio_cflags = 0;
|
||||
bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
|
||||
bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
|
||||
bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset);
|
||||
disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
|
||||
bp->bio_done = g_raid3_sync_done;
|
||||
bp->bio_from = disk->d_sync.ds_consumer;
|
||||
@ -2909,7 +2909,7 @@ g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
|
||||
cp->provider->name);
|
||||
return (error);
|
||||
}
|
||||
if (md->md_sectorsize > MAXPHYS) {
|
||||
if (md->md_sectorsize > maxphys) {
|
||||
G_RAID3_DEBUG(0, "The blocksize is too big.");
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -73,11 +73,12 @@ struct g_class g_shsec_class = {
|
||||
SYSCTL_DECL(_kern_geom);
|
||||
static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
|
||||
"GEOM_SHSEC stuff");
|
||||
static u_int g_shsec_debug = 0;
|
||||
static u_int g_shsec_debug;
|
||||
SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0,
|
||||
"Debug level");
|
||||
static u_int g_shsec_maxmem = MAXPHYS * 100;
|
||||
SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, maxmem, CTLFLAG_RDTUN, &g_shsec_maxmem,
|
||||
static u_long g_shsec_maxmem;
|
||||
SYSCTL_ULONG(_kern_geom_shsec, OID_AUTO, maxmem,
|
||||
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_shsec_maxmem,
|
||||
0, "Maximum memory that can be allocated for I/O (in bytes)");
|
||||
static u_int g_shsec_alloc_failed = 0;
|
||||
SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD,
|
||||
@ -113,10 +114,12 @@ static void
|
||||
g_shsec_init(struct g_class *mp __unused)
|
||||
{
|
||||
|
||||
g_shsec_zone = uma_zcreate("g_shsec_zone", MAXPHYS, NULL, NULL, NULL,
|
||||
g_shsec_maxmem = maxphys * 100;
|
||||
TUNABLE_ULONG_FETCH("kern.geom.shsec.maxmem,", &g_shsec_maxmem);
|
||||
g_shsec_zone = uma_zcreate("g_shsec_zone", maxphys, NULL, NULL, NULL,
|
||||
NULL, 0, 0);
|
||||
g_shsec_maxmem -= g_shsec_maxmem % MAXPHYS;
|
||||
uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / MAXPHYS);
|
||||
g_shsec_maxmem -= g_shsec_maxmem % maxphys;
|
||||
uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / maxphys);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -92,9 +92,10 @@ SYSCTL_PROC(_kern_geom_stripe, OID_AUTO, fast,
|
||||
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, NULL, 0,
|
||||
g_sysctl_stripe_fast, "I",
|
||||
"Fast, but memory-consuming, mode");
|
||||
static u_int g_stripe_maxmem = MAXPHYS * 100;
|
||||
SYSCTL_UINT(_kern_geom_stripe, OID_AUTO, maxmem, CTLFLAG_RDTUN, &g_stripe_maxmem,
|
||||
0, "Maximum memory that can be allocated in \"fast\" mode (in bytes)");
|
||||
static u_long g_stripe_maxmem;
|
||||
SYSCTL_ULONG(_kern_geom_stripe, OID_AUTO, maxmem,
|
||||
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_stripe_maxmem, 0,
|
||||
"Maximum memory that can be allocated in \"fast\" mode (in bytes)");
|
||||
static u_int g_stripe_fast_failed = 0;
|
||||
SYSCTL_UINT(_kern_geom_stripe, OID_AUTO, fast_failed, CTLFLAG_RD,
|
||||
&g_stripe_fast_failed, 0, "How many times \"fast\" mode failed");
|
||||
@ -129,10 +130,12 @@ static void
|
||||
g_stripe_init(struct g_class *mp __unused)
|
||||
{
|
||||
|
||||
g_stripe_zone = uma_zcreate("g_stripe_zone", MAXPHYS, NULL, NULL,
|
||||
g_stripe_maxmem = maxphys * 100;
|
||||
TUNABLE_ULONG_FETCH("kern.geom.stripe.maxmem,", &g_stripe_maxmem);
|
||||
g_stripe_zone = uma_zcreate("g_stripe_zone", maxphys, NULL, NULL,
|
||||
NULL, NULL, 0, 0);
|
||||
g_stripe_maxmem -= g_stripe_maxmem % MAXPHYS;
|
||||
uma_zone_set_max(g_stripe_zone, g_stripe_maxmem / MAXPHYS);
|
||||
g_stripe_maxmem -= g_stripe_maxmem % maxphys;
|
||||
uma_zone_set_max(g_stripe_zone, g_stripe_maxmem / maxphys);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -633,7 +636,7 @@ g_stripe_start(struct bio *bp)
|
||||
* Do use "fast" mode when:
|
||||
* 1. "Fast" mode is ON.
|
||||
* and
|
||||
* 2. Request size is less than or equal to MAXPHYS,
|
||||
* 2. Request size is less than or equal to maxphys,
|
||||
* which should always be true.
|
||||
* and
|
||||
* 3. Request size is bigger than stripesize * ndisks. If it isn't,
|
||||
@ -644,7 +647,7 @@ g_stripe_start(struct bio *bp)
|
||||
* and
|
||||
* 5. It is not a BIO_DELETE.
|
||||
*/
|
||||
if (g_stripe_fast && bp->bio_length <= MAXPHYS &&
|
||||
if (g_stripe_fast && bp->bio_length <= maxphys &&
|
||||
bp->bio_length >= stripesize * sc->sc_ndisks &&
|
||||
(bp->bio_flags & BIO_UNMAPPED) == 0 &&
|
||||
bp->bio_cmd != BIO_DELETE) {
|
||||
|
@ -136,7 +136,7 @@ SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug_block, CTLFLAG_RWTUN,
|
||||
/*
|
||||
* Maximum allowed valid block size (to prevent foot-shooting)
|
||||
*/
|
||||
#define MAX_BLKSZ (MAXPHYS)
|
||||
#define MAX_BLKSZ (maxphys)
|
||||
|
||||
static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
|
||||
|
||||
@ -292,7 +292,7 @@ g_uzip_request(struct g_geom *gp, struct bio *bp)
|
||||
bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk);
|
||||
while (1) {
|
||||
bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1);
|
||||
if (bp2->bio_length <= MAXPHYS) {
|
||||
if (bp2->bio_length <= maxphys) {
|
||||
break;
|
||||
}
|
||||
if (end_blk == (start_blk + 1)) {
|
||||
@ -791,7 +791,7 @@ g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
|
||||
goto e4;
|
||||
}
|
||||
if (sc->blksz > MAX_BLKSZ) {
|
||||
printf("%s: block size (%u) should not be larger than %d.\n",
|
||||
printf("%s: block size (%u) should not be larger than %lu.\n",
|
||||
gp->name, sc->blksz, MAX_BLKSZ);
|
||||
}
|
||||
total_offsets = sc->nblocks + 1;
|
||||
|
@ -106,7 +106,7 @@
|
||||
|
||||
/* Synchronization/initialization request sizes. */
|
||||
#define GV_MIN_SYNCSIZE 512
|
||||
#define GV_MAX_SYNCSIZE MAXPHYS
|
||||
#define GV_MAX_SYNCSIZE maxphys
|
||||
#define GV_DFLT_SYNCSIZE 65536
|
||||
|
||||
/* Flags for BIOs, as they are processed within vinum. */
|
||||
|
@ -148,8 +148,8 @@ g_virstor_init(struct g_class *mp __unused)
|
||||
{
|
||||
|
||||
/* Catch map struct size mismatch at compile time; Map entries must
|
||||
* fit into MAXPHYS exactly, with no wasted space. */
|
||||
CTASSERT(VIRSTOR_MAP_BLOCK_ENTRIES*VIRSTOR_MAP_ENTRY_SIZE == MAXPHYS);
|
||||
* fit into maxphys exactly, with no wasted space. */
|
||||
MPASS(VIRSTOR_MAP_BLOCK_ENTRIES * VIRSTOR_MAP_ENTRY_SIZE == maxphys);
|
||||
|
||||
/* Init UMA zones, TAILQ's, other global vars */
|
||||
}
|
||||
@ -1245,7 +1245,7 @@ virstor_check_and_run(struct g_virstor_softc *sc)
|
||||
struct g_virstor_map_entry *mapbuf;
|
||||
size_t bs;
|
||||
|
||||
bs = MIN(MAXPHYS, sc->map_size - count);
|
||||
bs = MIN(maxphys, sc->map_size - count);
|
||||
if (bs % sc->sectorsize != 0) {
|
||||
/* Check for alignment errors */
|
||||
bs = rounddown(bs, sc->sectorsize);
|
||||
|
@ -41,8 +41,8 @@ struct virstor_map_entry {
|
||||
};
|
||||
|
||||
#define VIRSTOR_MAP_ENTRY_SIZE (sizeof(struct virstor_map_entry))
|
||||
#define VIRSTOR_MAP_BLOCK_ENTRIES (MAXPHYS / VIRSTOR_MAP_ENTRY_SIZE)
|
||||
/* Struct size is guarded by CTASSERT in main source */
|
||||
#define VIRSTOR_MAP_BLOCK_ENTRIES (maxphys / VIRSTOR_MAP_ENTRY_SIZE)
|
||||
/* Struct size is guarded by MPASS in main source */
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
|
@ -146,8 +146,29 @@ char kernelname[MAXPATHLEN] = PATH_KERNEL; /* XXX bloat */
|
||||
SYSCTL_STRING(_kern, KERN_BOOTFILE, bootfile, CTLFLAG_RW | CTLFLAG_MPSAFE,
|
||||
kernelname, sizeof kernelname, "Name of kernel file booted");
|
||||
|
||||
SYSCTL_INT(_kern, KERN_MAXPHYS, maxphys, CTLFLAG_RD | CTLFLAG_CAPRD,
|
||||
SYSCTL_NULL_INT_PTR, MAXPHYS, "Maximum block I/O access size");
|
||||
#ifdef COMPAT_FREEBSD12
|
||||
static int
|
||||
sysctl_maxphys(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
u_long lvalue;
|
||||
int ivalue;
|
||||
|
||||
lvalue = maxphys;
|
||||
if (sizeof(int) == sizeof(u_long) || req->oldlen >= sizeof(u_long))
|
||||
return (sysctl_handle_long(oidp, &lvalue, 0, req));
|
||||
if (lvalue > INT_MAX)
|
||||
return (sysctl_handle_long(oidp, &lvalue, 0, req));
|
||||
ivalue = lvalue;
|
||||
return (sysctl_handle_int(oidp, &ivalue, 0, req));
|
||||
}
|
||||
SYSCTL_PROC(_kern, KERN_MAXPHYS, maxphys, CTLTYPE_LONG | CTLFLAG_RDTUN |
|
||||
CTLFLAG_NOFETCH | CTLFLAG_CAPRD | CTLFLAG_MPSAFE,
|
||||
NULL, 0, sysctl_maxphys, "UL", "Maximum block I/O access size");
|
||||
#else
|
||||
SYSCTL_ULONG(_kern, KERN_MAXPHYS, maxphys,
|
||||
CTLFLAG_RDTUN | CTLFLAG_NOFETCH | CTLFLAG_CAPRD,
|
||||
&maxphys, 0, "Maximum block I/O access size");
|
||||
#endif
|
||||
|
||||
SYSCTL_INT(_hw, HW_NCPU, ncpu, CTLFLAG_RD|CTLFLAG_CAPRD,
|
||||
&mp_ncpus, 0, "Number of active CPUs");
|
||||
|
@ -69,7 +69,7 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
|
||||
* need to reject any requests that will not fit into one buffer.
|
||||
*/
|
||||
if (dev->si_flags & SI_NOSPLIT &&
|
||||
(uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS ||
|
||||
(uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys ||
|
||||
uio->uio_iovcnt > 1)) {
|
||||
/*
|
||||
* Tell the user why his I/O was rejected.
|
||||
@ -78,10 +78,10 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
|
||||
uprintf("%s: request size=%zd > si_iosize_max=%d; "
|
||||
"cannot split request\n", devtoname(dev),
|
||||
uio->uio_resid, dev->si_iosize_max);
|
||||
if (uio->uio_resid > MAXPHYS)
|
||||
uprintf("%s: request size=%zd > MAXPHYS=%d; "
|
||||
if (uio->uio_resid > maxphys)
|
||||
uprintf("%s: request size=%zd > maxphys=%lu; "
|
||||
"cannot split request\n", devtoname(dev),
|
||||
uio->uio_resid, MAXPHYS);
|
||||
uio->uio_resid, maxphys);
|
||||
if (uio->uio_iovcnt > 1)
|
||||
uprintf("%s: request vectors=%d > 1; "
|
||||
"cannot split request\n", devtoname(dev),
|
||||
@ -101,12 +101,13 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
|
||||
pages = NULL;
|
||||
} else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
|
||||
pbuf = NULL;
|
||||
maxpages = btoc(MIN(uio->uio_resid, MAXPHYS)) + 1;
|
||||
maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1;
|
||||
pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
|
||||
} else {
|
||||
pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
|
||||
MPASS((pbuf->b_flags & B_MAXPHYS) != 0);
|
||||
sa = pbuf->b_data;
|
||||
maxpages = btoc(MAXPHYS);
|
||||
maxpages = btoc(maxphys);
|
||||
pages = pbuf->b_pages;
|
||||
}
|
||||
prot = VM_PROT_READ;
|
||||
@ -144,13 +145,13 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
|
||||
bp->bio_length = uio->uio_iov[i].iov_len;
|
||||
if (bp->bio_length > dev->si_iosize_max)
|
||||
bp->bio_length = dev->si_iosize_max;
|
||||
if (bp->bio_length > MAXPHYS)
|
||||
bp->bio_length = MAXPHYS;
|
||||
if (bp->bio_length > maxphys)
|
||||
bp->bio_length = maxphys;
|
||||
|
||||
/*
|
||||
* Make sure the pbuf can map the request.
|
||||
* The pbuf has kvasize = MAXPHYS, so a request
|
||||
* larger than MAXPHYS - PAGE_SIZE must be
|
||||
* The pbuf has kvasize = maxphys, so a request
|
||||
* larger than maxphys - PAGE_SIZE must be
|
||||
* page aligned or it will be fragmented.
|
||||
*/
|
||||
poff = (vm_offset_t)base & PAGE_MASK;
|
||||
|
@ -885,7 +885,7 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
|
||||
* do any heuristics and use exactly the value supplied by
|
||||
* application. Otherwise, we allow readahead up to "rem".
|
||||
* If application wants more, let it be, but there is no
|
||||
* reason to go above MAXPHYS. Also check against "obj_size",
|
||||
* reason to go above maxphys. Also check against "obj_size",
|
||||
* since vm_pager_has_page() can hint beyond EOF.
|
||||
*/
|
||||
if (flags & SF_USER_READAHEAD) {
|
||||
@ -895,7 +895,7 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
|
||||
npages;
|
||||
rhpages += SF_READAHEAD(flags);
|
||||
}
|
||||
rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages);
|
||||
rhpages = min(howmany(maxphys, PAGE_SIZE), rhpages);
|
||||
rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) -
|
||||
npages, rhpages);
|
||||
|
||||
|
@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_param.h"
|
||||
#include "opt_msgbuf.h"
|
||||
#include "opt_maxphys.h"
|
||||
#include "opt_maxusers.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -95,14 +96,15 @@ int maxprocperuid; /* max # of procs per user */
|
||||
int maxfiles; /* sys. wide open files limit */
|
||||
int maxfilesperproc; /* per-proc open files limit */
|
||||
int msgbufsize; /* size of kernel message buffer */
|
||||
int nbuf;
|
||||
int nbuf; /* number of bcache bufs */
|
||||
int bio_transient_maxcnt;
|
||||
int ngroups_max; /* max # groups per process */
|
||||
int nswbuf;
|
||||
pid_t pid_max = PID_MAX;
|
||||
long maxswzone; /* max swmeta KVA storage */
|
||||
long maxbcache; /* max buffer cache KVA storage */
|
||||
long maxpipekva; /* Limit on pipe KVA */
|
||||
u_long maxswzone; /* max swmeta KVA storage */
|
||||
u_long maxbcache; /* max buffer cache KVA storage */
|
||||
u_long maxpipekva; /* Limit on pipe KVA */
|
||||
u_long maxphys; /* max raw I/O transfer size */
|
||||
int vm_guest = VM_GUEST_NO; /* Running as virtual machine guest? */
|
||||
u_long maxtsiz; /* max text size */
|
||||
u_long dfldsiz; /* initial data size limit */
|
||||
@ -294,6 +296,18 @@ init_param2(long physpages)
|
||||
nbuf = NBUF;
|
||||
TUNABLE_INT_FETCH("kern.nbuf", &nbuf);
|
||||
TUNABLE_INT_FETCH("kern.bio_transient_maxcnt", &bio_transient_maxcnt);
|
||||
maxphys = MAXPHYS;
|
||||
TUNABLE_ULONG_FETCH("kern.maxphys", &maxphys);
|
||||
if (maxphys == 0) {
|
||||
maxphys = MAXPHYS;
|
||||
} else if (__bitcountl(maxphys) != 1) { /* power of two */
|
||||
if (flsl(maxphys) == NBBY * sizeof(maxphys))
|
||||
maxphys = MAXPHYS;
|
||||
else
|
||||
maxphys = 1UL << flsl(maxphys);
|
||||
}
|
||||
if (maxphys < PAGE_SIZE)
|
||||
maxphys = MAXPHYS;
|
||||
|
||||
/*
|
||||
* Physical buffers are pre-allocated buffers (struct buf) that
|
||||
@ -305,7 +319,7 @@ init_param2(long physpages)
|
||||
* The default for maxpipekva is min(1/64 of the kernel address space,
|
||||
* max(1/64 of main memory, 512KB)). See sys_pipe.c for more details.
|
||||
*/
|
||||
maxpipekva = (physpages / 64) * PAGE_SIZE;
|
||||
maxpipekva = ptoa(physpages / 64);
|
||||
TUNABLE_LONG_FETCH("kern.ipc.maxpipekva", &maxpipekva);
|
||||
if (maxpipekva < 512 * 1024)
|
||||
maxpipekva = 512 * 1024;
|
||||
|
@ -1252,14 +1252,16 @@ aio_qbio(struct proc *p, struct kaiocb *job)
|
||||
ki = p->p_aioinfo;
|
||||
poff = (vm_offset_t)cb->aio_buf & PAGE_MASK;
|
||||
if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
|
||||
if (cb->aio_nbytes > MAXPHYS) {
|
||||
if (cb->aio_nbytes > maxphys) {
|
||||
error = -1;
|
||||
goto unref;
|
||||
}
|
||||
|
||||
pbuf = NULL;
|
||||
job->pages = malloc(sizeof(vm_page_t) * (atop(round_page(
|
||||
cb->aio_nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO);
|
||||
} else {
|
||||
if (cb->aio_nbytes > MAXPHYS - poff) {
|
||||
if (cb->aio_nbytes > maxphys) {
|
||||
error = -1;
|
||||
goto unref;
|
||||
}
|
||||
@ -1273,6 +1275,7 @@ aio_qbio(struct proc *p, struct kaiocb *job)
|
||||
AIO_LOCK(ki);
|
||||
ki->kaio_buffer_count++;
|
||||
AIO_UNLOCK(ki);
|
||||
job->pages = pbuf->b_pages;
|
||||
}
|
||||
job->bp = bp = g_alloc_bio();
|
||||
|
||||
@ -1289,7 +1292,7 @@ aio_qbio(struct proc *p, struct kaiocb *job)
|
||||
prot |= VM_PROT_WRITE; /* Less backwards than it looks */
|
||||
job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
|
||||
(vm_offset_t)cb->aio_buf, bp->bio_length, prot, job->pages,
|
||||
nitems(job->pages));
|
||||
atop(maxphys) + 1);
|
||||
if (job->npages < 0) {
|
||||
error = EFAULT;
|
||||
goto doerror;
|
||||
@ -1320,6 +1323,8 @@ aio_qbio(struct proc *p, struct kaiocb *job)
|
||||
AIO_UNLOCK(ki);
|
||||
uma_zfree(pbuf_zone, pbuf);
|
||||
job->pbuf = NULL;
|
||||
} else {
|
||||
free(job->pages, M_TEMP);
|
||||
}
|
||||
g_destroy_bio(bp);
|
||||
job->bp = NULL;
|
||||
@ -2342,7 +2347,8 @@ aio_biowakeup(struct bio *bp)
|
||||
/* Release mapping into kernel space. */
|
||||
userp = job->userproc;
|
||||
ki = userp->p_aioinfo;
|
||||
if (job->pbuf) {
|
||||
vm_page_unhold_pages(job->pages, job->npages);
|
||||
if (job->pbuf != NULL) {
|
||||
pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages);
|
||||
uma_zfree(pbuf_zone, job->pbuf);
|
||||
job->pbuf = NULL;
|
||||
@ -2350,9 +2356,10 @@ aio_biowakeup(struct bio *bp)
|
||||
AIO_LOCK(ki);
|
||||
ki->kaio_buffer_count--;
|
||||
AIO_UNLOCK(ki);
|
||||
} else
|
||||
} else {
|
||||
free(job->pages, M_TEMP);
|
||||
atomic_subtract_int(&num_unmapped_aio, 1);
|
||||
vm_page_unhold_pages(job->pages, job->npages);
|
||||
}
|
||||
|
||||
bp = job->bp;
|
||||
job->bp = NULL;
|
||||
|
@ -147,8 +147,14 @@ struct bufdomain {
|
||||
#define BD_RUN_UNLOCK(bd) mtx_unlock(BD_RUN_LOCKPTR((bd)))
|
||||
#define BD_DOMAIN(bd) (bd - bdomain)
|
||||
|
||||
static struct buf *buf; /* buffer header pool */
|
||||
extern struct buf *swbuf; /* Swap buffer header pool. */
|
||||
static char *buf; /* buffer header pool */
|
||||
static struct buf *
|
||||
nbufp(unsigned i)
|
||||
{
|
||||
return ((struct buf *)(buf + (sizeof(struct buf) +
|
||||
sizeof(vm_page_t) * atop(maxbcachebuf)) * i));
|
||||
}
|
||||
|
||||
caddr_t __read_mostly unmapped_buf;
|
||||
|
||||
/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
|
||||
@ -994,8 +1000,8 @@ maxbcachebuf_adjust(void)
|
||||
maxbcachebuf = i;
|
||||
if (maxbcachebuf < MAXBSIZE)
|
||||
maxbcachebuf = MAXBSIZE;
|
||||
if (maxbcachebuf > MAXPHYS)
|
||||
maxbcachebuf = MAXPHYS;
|
||||
if (maxbcachebuf > maxphys)
|
||||
maxbcachebuf = maxphys;
|
||||
if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
|
||||
printf("maxbcachebuf=%d\n", maxbcachebuf);
|
||||
}
|
||||
@ -1113,10 +1119,10 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
|
||||
biotmap_sz = buf_sz / TRANSIENT_DENOM;
|
||||
buf_sz -= biotmap_sz;
|
||||
}
|
||||
if (biotmap_sz / INT_MAX > MAXPHYS)
|
||||
if (biotmap_sz / INT_MAX > maxphys)
|
||||
bio_transient_maxcnt = INT_MAX;
|
||||
else
|
||||
bio_transient_maxcnt = biotmap_sz / MAXPHYS;
|
||||
bio_transient_maxcnt = biotmap_sz / maxphys;
|
||||
/*
|
||||
* Artificially limit to 1024 simultaneous in-flight I/Os
|
||||
* using the transient mapping.
|
||||
@ -1136,10 +1142,11 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
|
||||
/*
|
||||
* Reserve space for the buffer cache buffers
|
||||
*/
|
||||
buf = (void *)v;
|
||||
v = (caddr_t)(buf + nbuf);
|
||||
buf = (char *)v;
|
||||
v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) *
|
||||
atop(maxbcachebuf)) * nbuf;
|
||||
|
||||
return(v);
|
||||
return (v);
|
||||
}
|
||||
|
||||
/* Initialize the buffer subsystem. Called before use of any buffers. */
|
||||
@ -1157,12 +1164,12 @@ bufinit(void)
|
||||
mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
|
||||
mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
|
||||
|
||||
unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
|
||||
unmapped_buf = (caddr_t)kva_alloc(maxphys);
|
||||
|
||||
/* finally, initialize each buffer header and stick on empty q */
|
||||
for (i = 0; i < nbuf; i++) {
|
||||
bp = &buf[i];
|
||||
bzero(bp, sizeof *bp);
|
||||
bp = nbufp(i);
|
||||
bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf));
|
||||
bp->b_flags = B_INVAL;
|
||||
bp->b_rcred = NOCRED;
|
||||
bp->b_wcred = NOCRED;
|
||||
@ -1246,7 +1253,8 @@ bufinit(void)
|
||||
|
||||
/* Setup the kva and free list allocators. */
|
||||
vmem_set_reclaim(buffer_arena, bufkva_reclaim);
|
||||
buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
|
||||
buf_zone = uma_zcache_create("buf free cache",
|
||||
sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf),
|
||||
NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
|
||||
|
||||
/*
|
||||
@ -1295,7 +1303,7 @@ vfs_buf_check_mapped(struct buf *bp)
|
||||
KASSERT(bp->b_data != unmapped_buf,
|
||||
("mapped buf: b_data was not updated %p", bp));
|
||||
KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
|
||||
MAXPHYS, ("b_data + b_offset unmapped %p", bp));
|
||||
maxphys, ("b_data + b_offset unmapped %p", bp));
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -1330,7 +1338,7 @@ bufshutdown(int show_busybufs)
|
||||
{
|
||||
static int first_buf_printf = 1;
|
||||
struct buf *bp;
|
||||
int iter, nbusy, pbusy;
|
||||
int i, iter, nbusy, pbusy;
|
||||
#ifndef PREEMPTION
|
||||
int subiter;
|
||||
#endif
|
||||
@ -1348,9 +1356,11 @@ bufshutdown(int show_busybufs)
|
||||
*/
|
||||
for (iter = pbusy = 0; iter < 20; iter++) {
|
||||
nbusy = 0;
|
||||
for (bp = &buf[nbuf]; --bp >= buf; )
|
||||
for (i = nbuf - 1; i >= 0; i--) {
|
||||
bp = nbufp(i);
|
||||
if (isbufbusy(bp))
|
||||
nbusy++;
|
||||
}
|
||||
if (nbusy == 0) {
|
||||
if (first_buf_printf)
|
||||
printf("All buffers synced.");
|
||||
@ -1391,7 +1401,8 @@ bufshutdown(int show_busybufs)
|
||||
* a fsck if we're just a client of a wedged NFS server
|
||||
*/
|
||||
nbusy = 0;
|
||||
for (bp = &buf[nbuf]; --bp >= buf; ) {
|
||||
for (i = nbuf - 1; i >= 0; i--) {
|
||||
bp = nbufp(i);
|
||||
if (isbufbusy(bp)) {
|
||||
#if 0
|
||||
/* XXX: This is bogus. We should probably have a BO_REMOTE flag instead */
|
||||
@ -1571,6 +1582,7 @@ buf_free(struct buf *bp)
|
||||
buf_deallocate(bp);
|
||||
bufkva_free(bp);
|
||||
atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
|
||||
MPASS((bp->b_flags & B_MAXPHYS) == 0);
|
||||
BUF_UNLOCK(bp);
|
||||
uma_zfree(buf_zone, bp);
|
||||
}
|
||||
@ -1674,6 +1686,7 @@ buf_alloc(struct bufdomain *bd)
|
||||
("bp: %p still has %d vm pages\n", bp, bp->b_npages));
|
||||
KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
|
||||
KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
|
||||
MPASS((bp->b_flags & B_MAXPHYS) == 0);
|
||||
|
||||
bp->b_domain = BD_DOMAIN(bd);
|
||||
bp->b_flags = 0;
|
||||
@ -2018,6 +2031,9 @@ bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
|
||||
|
||||
KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
|
||||
("Invalid gbflags 0x%x in %s", gbflags, __func__));
|
||||
MPASS((bp->b_flags & B_MAXPHYS) == 0);
|
||||
KASSERT(maxsize <= maxbcachebuf,
|
||||
("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf));
|
||||
|
||||
bufkva_free(bp);
|
||||
|
||||
@ -3036,6 +3052,10 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
|
||||
*/
|
||||
obj = bp->b_bufobj->bo_object;
|
||||
if (bp->b_npages < desiredpages) {
|
||||
KASSERT(desiredpages <= atop(maxbcachebuf),
|
||||
("vfs_vmio_extend past maxbcachebuf %p %d %u",
|
||||
bp, desiredpages, maxbcachebuf));
|
||||
|
||||
/*
|
||||
* We must allocate system pages since blocking
|
||||
* here could interfere with paging I/O, no
|
||||
@ -3163,7 +3183,7 @@ vfs_bio_awrite(struct buf *bp)
|
||||
(vp->v_mount != 0) && /* Only on nodes that have the size info */
|
||||
(bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
|
||||
size = vp->v_mount->mnt_stat.f_iosize;
|
||||
maxcl = MAXPHYS / size;
|
||||
maxcl = maxphys / size;
|
||||
|
||||
BO_RLOCK(bo);
|
||||
for (i = 1; i < maxcl; i++)
|
||||
@ -4853,6 +4873,10 @@ vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
|
||||
to = round_page(to);
|
||||
from = round_page(from);
|
||||
index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
|
||||
MPASS((bp->b_flags & B_MAXPHYS) == 0);
|
||||
KASSERT(to - from <= maxbcachebuf,
|
||||
("vm_hold_load_pages too large %p %#jx %#jx %u",
|
||||
bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf));
|
||||
|
||||
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
|
||||
/*
|
||||
@ -4912,12 +4936,13 @@ vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
|
||||
vm_prot_t prot;
|
||||
int pidx;
|
||||
|
||||
MPASS((bp->b_flags & B_MAXPHYS) != 0);
|
||||
prot = VM_PROT_READ;
|
||||
if (bp->b_iocmd == BIO_READ)
|
||||
prot |= VM_PROT_WRITE; /* Less backwards than it looks */
|
||||
if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
|
||||
(vm_offset_t)uaddr, len, prot, bp->b_pages,
|
||||
btoc(MAXPHYS))) < 0)
|
||||
pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
|
||||
(vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES);
|
||||
if (pidx < 0)
|
||||
return (-1);
|
||||
bp->b_bufsize = len;
|
||||
bp->b_npages = pidx;
|
||||
@ -4927,7 +4952,7 @@ vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
|
||||
bp->b_data = bp->b_kvabase + bp->b_offset;
|
||||
} else
|
||||
bp->b_data = unmapped_buf;
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5398,19 +5423,23 @@ DB_SHOW_COMMAND(bufqueues, bufqueues)
|
||||
db_printf("\n");
|
||||
cnt = 0;
|
||||
total = 0;
|
||||
for (j = 0; j < nbuf; j++)
|
||||
if (buf[j].b_domain == i && BUF_ISLOCKED(&buf[j])) {
|
||||
for (j = 0; j < nbuf; j++) {
|
||||
bp = nbufp(j);
|
||||
if (bp->b_domain == i && BUF_ISLOCKED(bp)) {
|
||||
cnt++;
|
||||
total += buf[j].b_bufsize;
|
||||
total += bp->b_bufsize;
|
||||
}
|
||||
}
|
||||
db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
|
||||
cnt = 0;
|
||||
total = 0;
|
||||
for (j = 0; j < nbuf; j++)
|
||||
if (buf[j].b_domain == i) {
|
||||
for (j = 0; j < nbuf; j++) {
|
||||
bp = nbufp(j);
|
||||
if (bp->b_domain == i) {
|
||||
cnt++;
|
||||
total += buf[j].b_bufsize;
|
||||
total += bp->b_bufsize;
|
||||
}
|
||||
}
|
||||
db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
|
||||
}
|
||||
}
|
||||
@ -5421,7 +5450,7 @@ DB_SHOW_COMMAND(lockedbufs, lockedbufs)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nbuf; i++) {
|
||||
bp = &buf[i];
|
||||
bp = nbufp(i);
|
||||
if (BUF_ISLOCKED(bp)) {
|
||||
db_show_buffer((uintptr_t)bp, 1, 0, NULL);
|
||||
db_printf("\n");
|
||||
@ -5464,7 +5493,7 @@ DB_COMMAND(countfreebufs, db_coundfreebufs)
|
||||
}
|
||||
|
||||
for (i = 0; i < nbuf; i++) {
|
||||
bp = &buf[i];
|
||||
bp = nbufp(i);
|
||||
if (bp->b_qindex == QUEUE_EMPTY)
|
||||
nfree++;
|
||||
else
|
||||
|
@ -386,6 +386,7 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
|
||||
bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT);
|
||||
if (bp == NULL)
|
||||
return tbp;
|
||||
MPASS((bp->b_flags & B_MAXPHYS) != 0);
|
||||
|
||||
/*
|
||||
* We are synthesizing a buffer out of vm_page_t's, but
|
||||
@ -871,6 +872,7 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
|
||||
--len;
|
||||
continue;
|
||||
}
|
||||
MPASS((bp->b_flags & B_MAXPHYS) != 0);
|
||||
|
||||
/*
|
||||
* We got a pbuf to make the cluster in.
|
||||
|
@ -974,8 +974,8 @@ vop_stdallocate(struct vop_allocate_args *ap)
|
||||
iosize = vap->va_blocksize;
|
||||
if (iosize == 0)
|
||||
iosize = BLKDEV_IOSIZE;
|
||||
if (iosize > MAXPHYS)
|
||||
iosize = MAXPHYS;
|
||||
if (iosize > maxphys)
|
||||
iosize = maxphys;
|
||||
buf = malloc(iosize, M_TEMP, M_WAITOK);
|
||||
|
||||
#ifdef __notyet__
|
||||
|
@ -58,7 +58,7 @@ __FBSDID("$FreeBSD$");
|
||||
#define JZ_MSC_IRQRES 1
|
||||
#define JZ_MSC_RESSZ 2
|
||||
#define JZ_MSC_DMA_SEGS 128
|
||||
#define JZ_MSC_DMA_MAX_SIZE MAXPHYS
|
||||
#define JZ_MSC_DMA_MAX_SIZE maxphys
|
||||
|
||||
#define JZ_MSC_INT_ERR_BITS (JZ_INT_CRC_RES_ERR | JZ_INT_CRC_READ_ERR | \
|
||||
JZ_INT_CRC_WRITE_ERR | JZ_INT_TIMEOUT_RES | \
|
||||
|
@ -3200,8 +3200,8 @@ ifconf(u_long cmd, caddr_t data)
|
||||
struct sbuf *sb;
|
||||
int error, full = 0, valid_len, max_len;
|
||||
|
||||
/* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */
|
||||
max_len = MAXPHYS - 1;
|
||||
/* Limit initial buffer size to maxphys to avoid DoS from userspace. */
|
||||
max_len = maxphys - 1;
|
||||
|
||||
/* Prevent hostile input from being able to crash the system */
|
||||
if (ifc->ifc_len <= 0)
|
||||
|
@ -115,7 +115,7 @@ mambodisk_attach(device_t dev)
|
||||
d->d_strategy = mambodisk_strategy;
|
||||
d->d_name = "mambodisk";
|
||||
d->d_drv1 = sc;
|
||||
d->d_maxsize = MAXPHYS; /* Maybe ask bridge? */
|
||||
d->d_maxsize = maxphys; /* Maybe ask bridge? */
|
||||
|
||||
d->d_sectorsize = 512;
|
||||
sc->maxblocks = mambocall(MAMBO_DISK_INFO,MAMBO_INFO_BLKSZ,d->d_unit)
|
||||
|
@ -1870,7 +1870,7 @@ fsl_sataaction(struct cam_sim *sim, union ccb *ccb)
|
||||
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
|
||||
cpi->protocol = PROTO_ATA;
|
||||
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
|
||||
cpi->maxio = MAXPHYS;
|
||||
cpi->maxio = maxphys;
|
||||
cpi->ccb_h.status = CAM_REQ_CMP;
|
||||
break;
|
||||
}
|
||||
|
@ -140,8 +140,8 @@ struct kaiocb {
|
||||
struct { /* BIO backend */
|
||||
struct bio *bp; /* (*) BIO pointer */
|
||||
struct buf *pbuf; /* (*) buffer pointer */
|
||||
struct vm_page *pages[btoc(MAXPHYS)+1]; /* (*) */
|
||||
int npages; /* (*) number of pages */
|
||||
struct vm_page **pages; /* (*) */
|
||||
};
|
||||
struct { /* fsync() requests */
|
||||
int pending; /* (a) number of pending I/O */
|
||||
|
@ -141,7 +141,6 @@ struct buf {
|
||||
TAILQ_HEAD(cluster_list_head, buf) cluster_head;
|
||||
TAILQ_ENTRY(buf) cluster_entry;
|
||||
} b_cluster;
|
||||
struct vm_page *b_pages[btoc(MAXPHYS)];
|
||||
int b_npages;
|
||||
struct workhead b_dep; /* (D) List of filesystem dependencies. */
|
||||
void *b_fsprivate1;
|
||||
@ -156,6 +155,7 @@ struct buf {
|
||||
#elif defined(BUF_TRACKING)
|
||||
const char *b_io_tracking;
|
||||
#endif
|
||||
struct vm_page *b_pages[];
|
||||
};
|
||||
|
||||
#define b_object b_bufobj->bo_object
|
||||
@ -234,7 +234,7 @@ struct buf {
|
||||
#define B_INVALONERR 0x00040000 /* Invalidate on write error. */
|
||||
#define B_00080000 0x00080000 /* Available flag. */
|
||||
#define B_00100000 0x00100000 /* Available flag. */
|
||||
#define B_00200000 0x00200000 /* Available flag. */
|
||||
#define B_MAXPHYS 0x00200000 /* nitems(b_pages[]) = atop(MAXPHYS). */
|
||||
#define B_RELBUF 0x00400000 /* Release VMIO buffer. */
|
||||
#define B_FS_FLAG1 0x00800000 /* Available flag for FS use. */
|
||||
#define B_NOCOPY 0x01000000 /* Don't copy-on-write this buf. */
|
||||
@ -247,7 +247,7 @@ struct buf {
|
||||
#define B_REMFREE 0x80000000 /* Delayed bremfree */
|
||||
|
||||
#define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \
|
||||
"\33paging\32infreecnt\31nocopy\30b23\27relbuf\26b21\25b20" \
|
||||
"\33paging\32infreecnt\31nocopy\30b23\27relbuf\26maxphys\25b20" \
|
||||
"\24b19\23invalonerr\22clusterok\21malloc\20nocache\17b14\16inval" \
|
||||
"\15reuse\14noreuse\13eintr\12done\11b8\10delwri" \
|
||||
"\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age"
|
||||
@ -496,8 +496,8 @@ buf_track(struct buf *bp __unused, const char *location __unused)
|
||||
|
||||
#ifdef _KERNEL
|
||||
extern int nbuf; /* The number of buffer headers */
|
||||
extern long maxswzone; /* Max KVA for swap structures */
|
||||
extern long maxbcache; /* Max KVA for buffer cache */
|
||||
extern u_long maxswzone; /* Max KVA for swap structures */
|
||||
extern u_long maxbcache; /* Max KVA for buffer cache */
|
||||
extern int maxbcachebuf; /* Max buffer cache block size */
|
||||
extern long runningbufspace;
|
||||
extern long hibufspace;
|
||||
|
@ -160,7 +160,7 @@
|
||||
#define DFLTPHYS (64 * 1024) /* default max raw I/O transfer size */
|
||||
#endif
|
||||
#ifndef MAXPHYS
|
||||
#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */
|
||||
#define MAXPHYS (1024 * 1024) /* max raw I/O transfer size */
|
||||
#endif
|
||||
#ifndef MAXDUMPPGS
|
||||
#define MAXDUMPPGS (DFLTPHYS/PAGE_SIZE)
|
||||
|
@ -74,6 +74,8 @@ extern int maxusers; /* system tune hint */
|
||||
extern int ngroups_max; /* max # of supplemental groups */
|
||||
extern int vm_guest; /* Running as virtual machine guest? */
|
||||
|
||||
extern u_long maxphys; /* max raw I/O transfer size */
|
||||
|
||||
/*
|
||||
* Detected virtual machine guest types. The intention is to expand
|
||||
* and/or add to the VM_GUEST_VM type if specific VM functionality is
|
||||
|
@ -1055,8 +1055,8 @@ ffs_mountfs(odevvp, mp, td)
|
||||
BO_UNLOCK(&odevvp->v_bufobj);
|
||||
if (dev->si_iosize_max != 0)
|
||||
mp->mnt_iosize_max = dev->si_iosize_max;
|
||||
if (mp->mnt_iosize_max > MAXPHYS)
|
||||
mp->mnt_iosize_max = MAXPHYS;
|
||||
if (mp->mnt_iosize_max > maxphys)
|
||||
mp->mnt_iosize_max = maxphys;
|
||||
if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
|
||||
error = EINVAL;
|
||||
vfs_mount_error(mp,
|
||||
|
@ -586,7 +586,7 @@ swap_pager_swap_init(void)
|
||||
* but it isn't very efficient).
|
||||
*
|
||||
* The nsw_cluster_max is constrained by the bp->b_pages[]
|
||||
* array, which has MAXPHYS / PAGE_SIZE entries, and our locally
|
||||
* array, which has maxphys / PAGE_SIZE entries, and our locally
|
||||
* defined MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
|
||||
* constrained by the swap device interleave stripe size.
|
||||
*
|
||||
@ -601,7 +601,7 @@ swap_pager_swap_init(void)
|
||||
* have one NFS swap device due to the command/ack latency over NFS.
|
||||
* So it all works out pretty well.
|
||||
*/
|
||||
nsw_cluster_max = min(MAXPHYS / PAGE_SIZE, MAX_PAGEOUT_CLUSTER);
|
||||
nsw_cluster_max = min(maxphys / PAGE_SIZE, MAX_PAGEOUT_CLUSTER);
|
||||
|
||||
nsw_wcount_async = 4;
|
||||
nsw_wcount_async_max = nsw_wcount_async;
|
||||
@ -1314,6 +1314,7 @@ swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count,
|
||||
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
bp = uma_zalloc(swrbuf_zone, M_WAITOK);
|
||||
MPASS((bp->b_flags & B_MAXPHYS) != 0);
|
||||
/* Pages cannot leave the object while busy. */
|
||||
for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
|
||||
MPASS(p->pindex == bm->pindex + i);
|
||||
@ -1522,8 +1523,9 @@ swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
bp = uma_zalloc(swwbuf_zone, M_WAITOK);
|
||||
MPASS((bp->b_flags & B_MAXPHYS) != 0);
|
||||
if (async)
|
||||
bp->b_flags = B_ASYNC;
|
||||
bp->b_flags |= B_ASYNC;
|
||||
bp->b_flags |= B_PAGING;
|
||||
bp->b_iocmd = BIO_WRITE;
|
||||
|
||||
|
@ -115,7 +115,6 @@ __FBSDID("$FreeBSD$");
|
||||
#define PFFOR 4
|
||||
|
||||
#define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT)
|
||||
#define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX)
|
||||
|
||||
#define VM_FAULT_DONTNEED_MIN 1048576
|
||||
|
||||
|
@ -212,7 +212,7 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
/*
|
||||
* Allocate the clean map to hold all of I/O virtual memory.
|
||||
*/
|
||||
size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * MAXPHYS;
|
||||
size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * maxphys;
|
||||
kmi->clean_sva = firstaddr = kva_alloc(size);
|
||||
kmi->clean_eva = firstaddr + size;
|
||||
|
||||
@ -233,7 +233,7 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
* And optionally transient bio space.
|
||||
*/
|
||||
if (bio_transient_maxcnt != 0) {
|
||||
size = (long)bio_transient_maxcnt * MAXPHYS;
|
||||
size = (long)bio_transient_maxcnt * maxphys;
|
||||
vmem_init(transient_arena, "transient arena",
|
||||
firstaddr, size, PAGE_SIZE, 0, 0);
|
||||
firstaddr += size;
|
||||
|
@ -396,7 +396,7 @@ long vmspace_resident_count(struct vmspace *vmspace);
|
||||
*/
|
||||
#define VM_FAULT_READ_AHEAD_MIN 7
|
||||
#define VM_FAULT_READ_AHEAD_INIT 15
|
||||
#define VM_FAULT_READ_AHEAD_MAX min(atop(MAXPHYS) - 1, UINT8_MAX)
|
||||
#define VM_FAULT_READ_AHEAD_MAX min(atop(maxphys) - 1, UINT8_MAX)
|
||||
|
||||
/*
|
||||
* The following "find_space" options are supported by vm_map_find().
|
||||
|
@ -183,7 +183,8 @@ vm_pager_bufferinit(void)
|
||||
{
|
||||
|
||||
/* Main zone for paging bufs. */
|
||||
pbuf_zone = uma_zcreate("pbuf", sizeof(struct buf),
|
||||
pbuf_zone = uma_zcreate("pbuf",
|
||||
sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t),
|
||||
pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE,
|
||||
UMA_ZONE_NOFREE);
|
||||
/* Few systems may still use this zone directly, so it needs a limit. */
|
||||
@ -384,7 +385,7 @@ pbuf_ctor(void *mem, int size, void *arg, int flags)
|
||||
bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
|
||||
bp->b_data = bp->b_kvabase;
|
||||
bp->b_xflags = 0;
|
||||
bp->b_flags = 0;
|
||||
bp->b_flags = B_MAXPHYS;
|
||||
bp->b_ioflags = 0;
|
||||
bp->b_iodone = NULL;
|
||||
bp->b_error = 0;
|
||||
@ -415,10 +416,10 @@ pbuf_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct buf *bp = mem;
|
||||
|
||||
bp->b_kvabase = (void *)kva_alloc(MAXPHYS);
|
||||
bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES));
|
||||
if (bp->b_kvabase == NULL)
|
||||
return (ENOMEM);
|
||||
bp->b_kvasize = MAXPHYS;
|
||||
bp->b_kvasize = ptoa(PBUF_PAGES);
|
||||
BUF_LOCKINIT(bp);
|
||||
LIST_INIT(&bp->b_dep);
|
||||
bp->b_rcred = bp->b_wcred = NOCRED;
|
||||
|
@ -112,6 +112,12 @@ extern struct pagerops mgtdevicepagerops;
|
||||
extern struct pagerops *pagertab[];
|
||||
extern struct mtx_padalign pbuf_mtx;
|
||||
|
||||
/*
|
||||
* Number of pages that pbuf buffer can store in b_pages.
|
||||
* It is +1 to allow for unaligned data buffer of maxphys size.
|
||||
*/
|
||||
#define PBUF_PAGES (atop(maxphys) + 1)
|
||||
|
||||
vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t,
|
||||
vm_ooffset_t, struct ucred *);
|
||||
void vm_pager_bufferinit(void);
|
||||
|
@ -817,7 +817,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
|
||||
KASSERT(foff < object->un_pager.vnp.vnp_size,
|
||||
("%s: page %p offset beyond vp %p size", __func__, m[0], vp));
|
||||
KASSERT(count <= nitems(bp->b_pages),
|
||||
KASSERT(count <= atop(maxphys),
|
||||
("%s: requested %d pages", __func__, count));
|
||||
|
||||
/*
|
||||
@ -832,6 +832,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
}
|
||||
|
||||
bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK);
|
||||
MPASS((bp->b_flags & B_MAXPHYS) != 0);
|
||||
|
||||
/*
|
||||
* Get the underlying device blocks for the file with VOP_BMAP().
|
||||
@ -916,10 +917,10 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
* Check that total amount of pages fit into buf. Trim rbehind and
|
||||
* rahead evenly if not.
|
||||
*/
|
||||
if (rbehind + rahead + count > nitems(bp->b_pages)) {
|
||||
if (rbehind + rahead + count > atop(maxphys)) {
|
||||
int trim, sum;
|
||||
|
||||
trim = rbehind + rahead + count - nitems(bp->b_pages) + 1;
|
||||
trim = rbehind + rahead + count - atop(maxphys) + 1;
|
||||
sum = rbehind + rahead;
|
||||
if (rbehind == before) {
|
||||
/* Roundup rbehind trim to block size. */
|
||||
@ -930,9 +931,9 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
rbehind -= trim * rbehind / sum;
|
||||
rahead -= trim * rahead / sum;
|
||||
}
|
||||
KASSERT(rbehind + rahead + count <= nitems(bp->b_pages),
|
||||
("%s: behind %d ahead %d count %d", __func__,
|
||||
rbehind, rahead, count));
|
||||
KASSERT(rbehind + rahead + count <= atop(maxphys),
|
||||
("%s: behind %d ahead %d count %d maxphys %lu", __func__,
|
||||
rbehind, rahead, count, maxphys));
|
||||
|
||||
/*
|
||||
* Fill in the bp->b_pages[] array with requested and optional
|
||||
@ -1014,7 +1015,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
*a_rahead = bp->b_pgafter;
|
||||
|
||||
#ifdef INVARIANTS
|
||||
KASSERT(bp->b_npages <= nitems(bp->b_pages),
|
||||
KASSERT(bp->b_npages <= atop(maxphys),
|
||||
("%s: buf %p overflowed", __func__, bp));
|
||||
for (int j = 1, prev = 0; j < bp->b_npages; j++) {
|
||||
if (bp->b_pages[j] == bogus_page)
|
||||
|
Loading…
Reference in New Issue
Block a user