Remove alignment requirements for KVA buffer mapping.

After r368124 vmapbuf() should happily map misaligned maxphys-sized buffers
thanks to extra page added to pbuf_zone.
This commit is contained in:
Alexander Motin 2020-11-29 00:49:14 +00:00
parent a93d2db0e7
commit 9093e27cc1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=368134

View File

@ -783,7 +783,6 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
u_int32_t lengths[CAM_PERIPH_MAXMAPS];
u_int32_t dirs[CAM_PERIPH_MAXMAPS];
bool misaligned[CAM_PERIPH_MAXMAPS];
bzero(mapinfo, sizeof(*mapinfo));
if (maxmap == 0)
@ -901,17 +900,6 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
(long)(lengths[i]), (u_long)maxmap);
return (E2BIG);
}
/*
* The userland data pointer passed in may not be page
* aligned. vmapbuf() truncates the address to a page
* boundary, so if the address isn't page aligned, we'll
* need enough space for the given transfer length, plus
* whatever extra space is necessary to make it to the page
* boundary.
*/
misaligned[i] = (lengths[i] +
(((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > maxphys);
}
/*
@ -934,7 +922,7 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
* small allocations malloc is backed by UMA, and so much
* cheaper on SMP systems.
*/
if ((lengths[i] <= periph_mapmem_thresh || misaligned[i]) &&
if (lengths[i] <= periph_mapmem_thresh &&
ccb->ccb_h.func_code != XPT_MMC_IO) {
*data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
M_WAITOK);