isci(4): Use controller->lock for busdma tags.

isci(4) uses deferred loading.  Typically on amd64 and i386 non-PAE
the tag does not create any restrictions, but on i386 PAE-tables but
non-PAE configs callbacks might be used.

Reported and tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
kib 2019-03-12 16:49:08 +00:00
parent 4153d090d9
commit 5954f59300
3 changed files with 13 additions and 7 deletions

View File

@ -408,7 +408,8 @@ isci_allocate_dma_buffer_callback(void *arg, bus_dma_segment_t *seg,
}
int
isci_allocate_dma_buffer(device_t device, struct ISCI_MEMORY *memory)
isci_allocate_dma_buffer(device_t device, struct ISCI_CONTROLLER *controller,
struct ISCI_MEMORY *memory)
{
uint32_t status;
@ -416,7 +417,8 @@ isci_allocate_dma_buffer(device_t device, struct ISCI_MEMORY *memory)
0x40 /* cacheline alignment */, 0x0, BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR, NULL, NULL, memory->size,
0x1 /* we want physically contiguous */,
memory->size, 0, NULL, NULL, &memory->dma_tag);
memory->size, 0, busdma_lock_mutex, &controller->lock,
&memory->dma_tag);
if(status == ENOMEM) {
isci_log_message(0, "ISCI", "bus_dma_tag_create failed\n");

View File

@ -253,7 +253,8 @@ struct isci_softc {
int isci_allocate_resources(device_t device);
int isci_allocate_dma_buffer(device_t device, struct ISCI_MEMORY *memory);
int isci_allocate_dma_buffer(device_t device, struct ISCI_CONTROLLER *lock,
struct ISCI_MEMORY *memory);
void isci_remote_device_reset(struct ISCI_REMOTE_DEVICE *remote_device,
union ccb *ccb);

View File

@ -428,7 +428,8 @@ int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
error = isci_allocate_dma_buffer(device, uncached_controller_memory);
error = isci_allocate_dma_buffer(device, controller,
uncached_controller_memory);
if (error != 0)
return (error);
@ -443,7 +444,8 @@ int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
);
error = isci_allocate_dma_buffer(device, cached_controller_memory);
error = isci_allocate_dma_buffer(device, controller,
cached_controller_memory);
if (error != 0)
return (error);
@ -456,7 +458,7 @@ int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
request_memory->size =
controller->queue_depth * isci_io_request_get_object_size();
error = isci_allocate_dma_buffer(device, request_memory);
error = isci_allocate_dma_buffer(device, controller, request_memory);
if (error != 0)
return (error);
@ -478,7 +480,8 @@ int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
isci_io_request_get_max_io_size(),
SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0,
busdma_lock_mutex, &controller->lock,
&controller->buffer_dma_tag);
sci_pool_initialize(controller->request_pool);