smartpqi: update to version 4410.0.2005

This updates the smartpqi driver to Microsemi's latest code. This will
be the driver for FreeBSD 14 (with updates), but no MFC is planned.

Reviewed by: imp
Differential Revision: https://reviews.freebsd.org/D41550
This commit is contained in:
John Hall 2023-08-24 15:18:16 -06:00 committed by Warner Losh
parent 725e4008ef
commit 7ea28254ec
26 changed files with 5413 additions and 2465 deletions

View File

@ -1,5 +1,7 @@
.\" Copyright (c) 2018 Murthy Bhat
.\" All rights reserved.
.\" Copyright (C) 2019-2023, Microchip Technology Inc. and its subsidiaries
.\" Copyright (C) 2016-2018, Microsemi Corporation
.\" Copyright (C) 2016, PMC-Sierra, Inc.
.\" Written by John Hall <john.hall@microchip.com>
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
@ -22,25 +24,23 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$ stable/10/share/man/man4/smartpqi.4 195614 2017-01-11 08:10:18Z jkim $
.Dd April 6, 2018
.\" $Id$
.Dd $Mdocdate$
.Dt SMARTPQI 4
.Os
.Sh NAME
.Nm smartpqi
.Nd Microsemi smartpqi SCSI driver for PQI controllers
.Nd "Microchip Smart Storage SCSI driver"
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
To compile this driver into the kernel, place these lines in the kernel
configuration file:
.Bd -ragged -offset indent
.Cd device pci
.Cd device scbus
.Cd device smartpqi
.Ed
.Pp
Alternatively, to load the driver as a
module at boot time, place the following line in
The driver can be loaded as a module at boot time by placing this line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
smartpqi_load="YES"
@ -48,36 +48,33 @@ smartpqi_load="YES"
.Sh DESCRIPTION
The
.Nm
SCSI driver provides support for the new generation of PQI controllers from
Microsemi.
The
.Nm
driver is the first SCSI driver to implement the PQI queuing model.
.Pp
The
.Nm
driver will replace the aacraid driver for Adaptec Series 9 controllers.
.Pp
The
.Pa /dev/smartpqi?
device nodes provide access to the management interface of the controller.
One node exists per installed card.
driver provides support for Microchip Technology Inc. / Adaptec SmartRaid and
SmartHBA SATA/SAS/NVME PCIe controllers
.Sh HARDWARE
Controllers supported by the
.Nm
driver include:
driver include, but not limited to:
.Pp
.Bl -bullet -compact
.It
HPE Gen10 Smart Array Controller Family
.It
OEM Controllers based on the Microsemi Chipset
Adaptec SmartRaid and SmartHBA Controllers
.It
OEM Controllers based on the Microchip Technology Inc. SmartROC
and SmartIOC Chipsets
.El
.Sh FILES
.Bl -tag -width /boot/kernel/aac.ko -compact
.Bl -tag -width /boot/kernel/smartpqi.ko -compact
.It Pa /dev/smartpqi?
smartpqi management interface
.El
.Sh NOTES
.Ss Configuration
To configure a Microchip Smart Storage controller,
refer to the User Guide for the controller,
which can be found by searching for the specific controller at
https://www.microchip.com/design-centers/storage
.Sh SEE ALSO
.Xr kld 4 ,
.Xr linux 4 ,
@ -87,17 +84,13 @@ smartpqi management interface
.Xr loader.conf 5 ,
.Xr camcontrol 8 ,
.Xr kldload 8
.Rs
.%T "Microsemi Website"
.%U https://www.microsemi.com/
.Re
.Sh HISTORY
The
.Nm
driver first appeared in
.Fx 11.1 .
.Sh AUTHORS
.An Murthy Bhat
.Aq murthy.bhat@microsemi.com
.An John Hall
.Aq john.hall@microchip.com
.Sh BUGS
The controller is not actually paused on suspend/resume.

View File

@ -372,6 +372,7 @@ dev/smartpqi/smartpqi_cam.c optional smartpqi
dev/smartpqi/smartpqi_cmd.c optional smartpqi
dev/smartpqi/smartpqi_discovery.c optional smartpqi
dev/smartpqi/smartpqi_event.c optional smartpqi
dev/smartpqi/smartpqi_features.c optional smartpqi
dev/smartpqi/smartpqi_helper.c optional smartpqi
dev/smartpqi/smartpqi_init.c optional smartpqi
dev/smartpqi/smartpqi_intr.c optional smartpqi

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -49,12 +49,15 @@ update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
cpi->hba_eng_cnt = 0;
cpi->max_lun = PQI_MAX_MULTILUN;
cpi->max_target = 1088;
cpi->max_target = MAX_TARGET_DEVICES;
cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
cpi->initiator_id = 255;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN-1);
cpi->sim_vid[sizeof(cpi->sim_vid)-1] = '\0';
strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN-1);
cpi->hba_vid[sizeof(cpi->hba_vid)-1] = '\0';
strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN-1);
cpi->dev_name[sizeof(cpi->dev_name)-1] = '\0';
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
@ -73,7 +76,7 @@ update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
}
/*
* Get transport settings of the smartpqi adapter
* Get transport settings of the smartpqi adapter.
*/
static void
get_transport_settings(struct pqisrc_softstate *softs,
@ -84,7 +87,7 @@ get_transport_settings(struct pqisrc_softstate *softs,
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
DBG_FUNC("IN\n");
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_SPC4;
cts->transport = XPORT_SPI;
@ -106,10 +109,12 @@ void
os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
union ccb *ccb;
uint64_t lun;
DBG_FUNC("IN\n");
if(softs->os_specific.sim_registered) {
lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
if(softs->os_specific.sim_registered) {
if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
DBG_ERR("rescan failed (can't allocate CCB)\n");
return;
@ -117,7 +122,7 @@ os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
if (xpt_create_path(&ccb->ccb_h.path, NULL,
cam_sim_path(softs->os_specific.sim),
device->target, device->lun) != CAM_REQ_CMP) {
device->target, lun) != CAM_REQ_CMP) {
DBG_ERR("rescan failed (can't create path)\n");
xpt_free_ccb(ccb);
return;
@ -134,20 +139,25 @@ os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
void
os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
struct cam_path *tmppath;
struct cam_path *tmppath = NULL;
uint64_t lun;
DBG_FUNC("IN\n");
lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
if(softs->os_specific.sim_registered) {
if (xpt_create_path(&tmppath, NULL,
if (xpt_create_path(&tmppath, NULL,
cam_sim_path(softs->os_specific.sim),
device->target, device->lun) != CAM_REQ_CMP) {
DBG_ERR("unable to create path for async event");
device->target, lun) != CAM_REQ_CMP) {
DBG_ERR("unable to create path for async event\n");
return;
}
xpt_async(AC_LOST_DEVICE, tmppath, NULL);
xpt_free_path(tmppath);
softs->device_list[device->target][device->lun] = NULL;
/* softs->device_list[device->target][device->lun] = NULL; */
int index = pqisrc_find_device_list_index(softs,device);
if (index >= 0 && index < PQI_MAX_DEVICES)
softs->dev_list[index] = NULL;
pqisrc_free_device(softs, device);
}
@ -191,22 +201,20 @@ pqi_synch_request(rcb_t *rcb)
return;
if (rcb->bcount != 0 ) {
if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap,
BUS_DMASYNC_POSTREAD);
if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
rcb->cm_datamap,BUS_DMASYNC_POSTREAD);
if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap,
BUS_DMASYNC_POSTWRITE);
rcb->cm_datamap,BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap);
rcb->cm_datamap);
}
rcb->cm_flags &= ~PQI_CMD_MAPPED;
if(rcb->sgt && rcb->nseg)
os_mem_free(rcb->softs, (void*)rcb->sgt,
rcb->nseg*sizeof(sgt_t));
rcb->nseg*sizeof(sgt_t));
DBG_IO("OUT\n");
}
@ -242,6 +250,7 @@ smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
if(cdb[0] == INQUIRY &&
(cdb[1] & SI_EVPD) == 0 &&
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
@ -249,23 +258,31 @@ smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
inq = (struct scsi_inquiry_data *)csio->data_ptr;
device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
/* device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; */
int target = csio->ccb_h.target_id;
int lun = csio->ccb_h.target_lun;
int index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
if (index != INVALID_ELEM)
device = softs->dev_list[index];
/* Let the disks be probed and dealt with via CAM. Only for LD
let it fall through and inquiry be tweaked */
if (!device || !pqisrc_is_logical_device(device) ||
(device->devtype != DISK_DEVICE) ||
if( !device || !pqisrc_is_logical_device(device) ||
(device->devtype != DISK_DEVICE) ||
pqisrc_is_external_raid_device(device)) {
return;
}
strncpy(inq->vendor, device->vendor,
SID_VENDOR_SIZE);
SID_VENDOR_SIZE-1);
inq->vendor[sizeof(inq->vendor)-1] = '\0';
strncpy(inq->product,
pqisrc_raidlevel_to_string(device->raid_level),
SID_PRODUCT_SIZE);
SID_PRODUCT_SIZE-1);
inq->product[sizeof(inq->product)-1] = '\0';
strncpy(inq->revision, device->volume_offline?"OFF":"OK",
SID_REVISION_SIZE);
SID_REVISION_SIZE-1);
inq->revision[sizeof(inq->revision)-1] = '\0';
}
DBG_FUNC("OUT\n");
@ -308,7 +325,7 @@ os_io_response_success(rcb_t *rcb)
if (csio == NULL)
panic("csio is null");
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
csio->ccb_h.status = CAM_REQ_CMP;
pqi_complete_scsi_io(csio, rcb);
@ -383,10 +400,11 @@ os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
uint8_t *sense_data = NULL;
if (sense_data_len)
sense_data = err_info->data;
copy_sense_data_to_csio(csio, sense_data, sense_data_len);
csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
| CAM_AUTOSNS_VALID
| CAM_REQ_CMP_ERR;
| CAM_AUTOSNS_VALID
| CAM_REQ_CMP_ERR;
}
break;
@ -425,7 +443,7 @@ os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
if (rcb == NULL)
panic("rcb is null");
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
if (csio == NULL)
panic("csio is null");
@ -462,7 +480,7 @@ os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
/* Timed out TMF response comes here */
if (rcb->tm_req) {
rcb->req_pending = false;
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
DBG_ERR("AIO Disabled for TMF\n");
return;
}
@ -484,14 +502,14 @@ os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
rcb->req_pending = false;
return;
case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
rcb->status = REQUEST_FAILED;
rcb->status = PQI_STATUS_TIMEOUT;
rcb->req_pending = false;
return;
default:
@ -536,8 +554,9 @@ pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
pqisrc_softstate_t *softs = rcb->softs;
union ccb *ccb;
if (error || nseg > softs->pqi_cap.max_sg_elem) {
DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
if (error || nseg > softs->pqi_cap.max_sg_elem)
{
DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%u)\n",
error, nseg, softs->pqi_cap.max_sg_elem);
goto error_io;
}
@ -556,15 +575,15 @@ pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
rcb->sgt[i].flags = 0;
}
if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, BUS_DMASYNC_PREREAD);
if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, BUS_DMASYNC_PREREAD);
if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
/* Call IO functions depending on pd or ld */
rcb->status = REQUEST_PENDING;
rcb->status = PQI_STATUS_FAILURE;
error = pqisrc_build_send_io(softs, rcb);
@ -607,7 +626,7 @@ pqi_map_request(rcb_t *rcb)
bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %u\n",
bsd_status, rcb->bcount);
return bsd_status;
}
@ -618,7 +637,7 @@ pqi_map_request(rcb_t *rcb)
* busdma.
*/
/* Call IO functions depending on pd or ld */
rcb->status = REQUEST_PENDING;
rcb->status = PQI_STATUS_FAILURE;
if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
bsd_status = EIO;
@ -695,7 +714,7 @@ smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
return;
}
bzero(ccb, sizeof(union ccb));
memset(ccb, 0, sizeof(union ccb));
xpt_setup_ccb(&ccb->ccb_h, path, 5);
ccb->ccb_h.func_code = XPT_SCAN_LUN;
ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
@ -712,15 +731,17 @@ smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
void
smartpqi_target_rescan(struct pqisrc_softstate *softs)
{
int target = 0, lun = 0;
pqi_scsi_dev_t *device;
int index;
DBG_FUNC("IN\n");
for(target = 0; target < PQI_MAX_DEVICES; target++){
for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
if(softs->device_list[target][lun]){
smartpqi_lun_rescan(softs, target, lun);
}
for(index = 0; index < PQI_MAX_DEVICES; index++){
/* if(softs->device_list[target][lun]){ */
if(softs->dev_list[index] != NULL) {
device = softs->dev_list[index];
DBG_INFO("calling smartpqi_lun_rescan with TL = %d:%d\n",device->target,device->lun);
smartpqi_lun_rescan(softs, device->target, device->lun);
}
}
@ -758,7 +779,7 @@ void
os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
{
int tag = 0;
pqi_scsi_dev_t *dvp = NULL;
pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN\n");
@ -771,7 +792,6 @@ os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
if (dvp)
pqisrc_decrement_device_active_io(softs, dvp);
}
}
@ -785,21 +805,36 @@ static int
pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
{
rcb_t *rcb;
uint32_t tag, no_transfer = 0;
uint32_t tag;
pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
cam_sim_softc(sim);
int32_t error;
pqi_scsi_dev_t *dvp;
int target, lun, index;
DBG_FUNC("IN\n");
if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
/* if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { */
target = ccb->ccb_h.target_id;
lun = ccb->ccb_h.target_lun;
index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
if (index == INVALID_ELEM) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Invalid index/device!!!, Device BTL %u:%d:%d\n", softs->bus_id, target, lun);
return ENXIO;
}
if( softs->dev_list[index] == NULL ) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
return ENXIO;
}
dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
/* DBG_INFO("starting IO on BTL = %d:%d:%d index = %d\n",softs->bus_id,target,lun,index); */
/* dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
dvp = softs->dev_list[index];
/* Check controller state */
if (IN_PQI_RESET(softs)) {
ccb->ccb_h.status = CAM_SCSI_BUS_RESET
@ -827,7 +862,7 @@ pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
}
tag = pqisrc_get_tag(&softs->taglist);
if (tag == INVALID_ELEM) {
if( tag == INVALID_ELEM ) {
DBG_ERR("Get Tag failed\n");
xpt_freeze_simq(softs->os_specific.sim, 1);
softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
@ -835,7 +870,7 @@ pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
return EIO;
}
DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
DBG_IO("tag = %u &softs->taglist : %p\n", tag, &softs->taglist);
rcb = &softs->rcb[tag];
os_reset_rcb(rcb);
@ -844,30 +879,13 @@ pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
rcb->cmdlen = ccb->csio.cdb_len;
ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
case CAM_DIR_IN:
rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
break;
case CAM_DIR_OUT:
rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
break;
case CAM_DIR_NONE:
no_transfer = 1;
break;
default:
DBG_ERR("Unknown Dir\n");
break;
}
rcb->cm_ccb = ccb;
rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
/* rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
rcb->dvp = softs->dev_list[index];
rcb->cm_data = (void *)ccb->csio.data_ptr;
rcb->bcount = ccb->csio.dxfer_len;
if (!no_transfer) {
rcb->cm_data = (void *)ccb->csio.data_ptr;
rcb->bcount = ccb->csio.dxfer_len;
} else {
rcb->cm_data = NULL;
rcb->bcount = 0;
}
/*
* Submit the request to the adapter.
*
@ -900,7 +918,7 @@ static inline int
pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
{
if (PQI_STATUS_SUCCESS == pqi_status &&
REQUEST_SUCCESS == rcb->status)
PQI_STATUS_SUCCESS == rcb->status)
return BSD_SUCCESS;
else
return EIO;
@ -912,8 +930,8 @@ pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
static int
pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
{
struct ccb_hdr *ccb_h = &ccb->ccb_h;
rcb_t *rcb = NULL;
struct ccb_hdr *ccb_h = &ccb->ccb_h;
rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
uint32_t tag;
int rval;
@ -924,7 +942,7 @@ pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
rcb = &softs->rcb[tag];
rcb->tag = tag;
if (!rcb->dvp) {
if (rcb->dvp == NULL) {
DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
rval = ENXIO;
goto error_tmf;
@ -963,8 +981,9 @@ pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
tag = pqisrc_get_tag(&softs->taglist);
rcb = &softs->rcb[tag];
rcb->tag = tag;
rcb->cm_ccb = ccb;
if (!rcb->dvp) {
if (rcb->dvp == NULL) {
DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
rval = ENXIO;
goto error_tmf;
@ -992,24 +1011,38 @@ pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
static int
pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
{
/* pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
struct ccb_hdr *ccb_h = &ccb->ccb_h;
pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
rcb_t *rcb = NULL;
uint32_t tag;
int rval;
int bus, target, lun;
int index;
DBG_FUNC("IN\n");
bus = softs->bus_id;
target = ccb->ccb_h.target_id;
lun = ccb->ccb_h.target_lun;
index = pqisrc_find_btl_list_index(softs,bus,target,lun);
if (index == INVALID_ELEM) {
DBG_ERR("device not found at BTL %d:%d:%d\n",bus,target,lun);
return (-1);
}
pqi_scsi_dev_t *devp = softs->dev_list[index];
if (devp == NULL) {
DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
return ENXIO;
return (-1);
}
tag = pqisrc_get_tag(&softs->taglist);
rcb = &softs->rcb[tag];
rcb->tag = tag;
devp->reset_in_progress = true;
rcb->cm_ccb = ccb;
rcb->tm_req = true;
@ -1017,6 +1050,7 @@ pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
SOP_TASK_MANAGEMENT_LUN_RESET);
rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
devp->reset_in_progress = false;
os_reset_rcb(rcb);
@ -1137,9 +1171,9 @@ smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
{
struct ccb_relsim crs;
DBG_INFO("IN\n");
DBG_FUNC("IN\n");
memset(&crs, 0, sizeof(crs));
memset(&crs, 0, sizeof(struct ccb_relsim));
xpt_setup_ccb(&crs.ccb_h, path, 5);
crs.ccb_h.func_code = XPT_REL_SIMQ;
crs.ccb_h.flags = CAM_DEV_QFREEZE;
@ -1150,7 +1184,7 @@ smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
}
DBG_INFO("OUT\n");
DBG_FUNC("OUT\n");
}
/*
@ -1175,15 +1209,20 @@ smartpqi_async(void *callback_arg, u_int32_t code,
}
uint32_t t_id = cgd->ccb_h.target_id;
if (t_id <= (PQI_CTLR_INDEX - 1)) {
/* if (t_id <= (PQI_CTLR_INDEX - 1)) { */
if (t_id >= PQI_CTLR_INDEX) {
if (softs != NULL) {
pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
if (dvp == NULL) {
DBG_ERR("Target is null, target id=%d\n", t_id);
break;
/* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */
int lun = cgd->ccb_h.target_lun;
int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun);
if (index != INVALID_ELEM) {
pqi_scsi_dev_t *dvp = softs->dev_list[index];
if (dvp == NULL) {
DBG_ERR("Target is null, target id=%u\n", t_id);
break;
}
smartpqi_adjust_queue_depth(path, dvp->queue_depth);
}
smartpqi_adjust_queue_depth(path,
dvp->queue_depth);
}
}
break;
@ -1203,7 +1242,7 @@ register_sim(struct pqisrc_softstate *softs, int card_index)
{
int max_transactions;
union ccb *ccb = NULL;
int error;
cam_status status = 0;
struct ccb_setasync csa;
struct cam_sim *sim;
@ -1230,9 +1269,9 @@ register_sim(struct pqisrc_softstate *softs, int card_index)
softs->os_specific.sim = sim;
mtx_lock(&softs->os_specific.cam_lock);
error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
if (error != CAM_SUCCESS) {
DBG_ERR("xpt_bus_register failed errno %d\n", error);
status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
if (status != CAM_SUCCESS) {
DBG_ERR("xpt_bus_register failed status=%d\n", status);
cam_sim_free(softs->os_specific.sim, FALSE);
cam_simq_free(softs->os_specific.devq);
mtx_unlock(&softs->os_specific.cam_lock);
@ -1258,11 +1297,11 @@ register_sim(struct pqisrc_softstate *softs, int card_index)
return ENXIO;
}
/*
* Callback to set the queue depth per target which is
* Callback to set the queue depth per target which is
* derived from the FW.
*/
*/
softs->os_specific.path = ccb->ccb_h.path;
memset(&csa, 0, sizeof(csa));
memset(&csa, 0, sizeof(struct ccb_setasync));
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_FOUND_DEVICE;
@ -1270,12 +1309,12 @@ register_sim(struct pqisrc_softstate *softs, int card_index)
csa.callback_arg = softs;
xpt_action((union ccb *)&csa);
if (csa.ccb_h.status != CAM_REQ_CMP) {
DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
csa.ccb_h.status);
}
mtx_unlock(&softs->os_specific.cam_lock);
DBG_INFO("OUT\n");
DBG_FUNC("OUT\n");
return BSD_SUCCESS;
}
@ -1287,15 +1326,14 @@ void
deregister_sim(struct pqisrc_softstate *softs)
{
struct ccb_setasync csa;
DBG_FUNC("IN\n");
if (softs->os_specific.mtx_init) {
mtx_lock(&softs->os_specific.cam_lock);
}
memset(&csa, 0, sizeof(csa));
memset(&csa, 0, sizeof(struct ccb_setasync));
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = 0;
@ -1331,23 +1369,23 @@ deregister_sim(struct pqisrc_softstate *softs)
void
os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
struct cam_path *tmppath;
struct cam_path *tmppath = NULL;
DBG_FUNC("IN\n");
DBG_FUNC("IN\n");
if(softs->os_specific.sim_registered) {
if (xpt_create_path(&tmppath, NULL,
cam_sim_path(softs->os_specific.sim),
device->target, device->lun) != CAM_REQ_CMP) {
DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
device->bus, device->target, device->lun);
return;
}
xpt_async(AC_INQ_CHANGED, tmppath, NULL);
xpt_free_path(tmppath);
}
if(softs->os_specific.sim_registered) {
if (xpt_create_path(&tmppath, NULL,
cam_sim_path(softs->os_specific.sim),
device->target, device->lun) != CAM_REQ_CMP) {
DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
device->bus, device->target, device->lun);
return;
}
xpt_async(AC_INQ_CHANGED, tmppath, NULL);
xpt_free_path(tmppath);
}
device->scsi_rescan = false;
device->scsi_rescan = false;
DBG_FUNC("OUT\n");
DBG_FUNC("OUT\n");
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -36,10 +36,14 @@ pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req)
char *slot = NULL;
uint32_t offset;
iu_header_t *hdr = (iu_header_t *)req;
/*TODO : Can be fixed a size copying of IU ? */
uint32_t iu_len = hdr->iu_length + 4 ; /* header size */
int i = 0;
DBG_FUNC("IN\n");
/* The code below assumes we only take 1 element (no spanning) */
ASSERT(iu_len <= ib_q->elem_size);
PQI_LOCK(&ib_q->lock);
/* Check queue full */
@ -55,15 +59,15 @@ pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req)
/* Copy the IU */
memcpy(slot, req, iu_len);
DBG_INFO("IU : \n");
DBG_IO("IU : \n");
for(i = 0; i< iu_len; i++)
DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
DBG_IO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem;
DBG_INFO("ib_q->pi_local : %x IU size : %d\n",
DBG_IO("ib_q->pi_local : %x IU size : %d\n",
ib_q->pi_local, hdr->iu_length);
DBG_INFO("*ib_q->ci_virt_addr: %x\n",
DBG_IO("*ib_q->ci_virt_addr: %x\n",
*(ib_q->ci_virt_addr));
/* Inform the fw about the new IU */

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -27,34 +27,65 @@
#ifndef _PQI_DEFINES_H
#define _PQI_DEFINES_H
#define PQI_STATUS_FAILURE -1
#define PQI_STATUS_TIMEOUT -2
#define PQI_STATUS_QFULL -3
#define PQI_STATUS_SUCCESS 0
#define SIS_POLL_WAIT
#define DEVICE_HINT
#ifndef CT_ASSERT
/* If the OS hasn't specified a preferred compile time assert, create one */
#if !defined(__C_ASSERT__)
#define CT_ASSERT(e) extern char __assert_test_case[1 - (2*(!(e)))]
#else
#define CT_ASSERT(e) typedef char __C_ASSERT__[(e)?1:-1]
#endif
#endif
#define PQI_STATUS_FAILURE -1
#define PQI_STATUS_TIMEOUT -2
#define PQI_STATUS_QFULL -3
#define PQI_STATUS_SUCCESS 0
#define BITS_PER_BYTE 8
#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1
#define PQI_REQUEST_HEADER_LENGTH 4
/* Maximum timeout for internal command completion */
#define TIMEOUT_INFINITE ((uint32_t) (-1))
#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE
#define TIMEOUT_INFINITE ((uint32_t) (-1))
#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE
#define PQISRC_PASSTHROUGH_CMD_TIMEOUT PQISRC_CMD_TIMEOUT
/* Delay in milli seconds */
#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000)
#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000)
/* Delay in micro seconds */
#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */
#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */
/* If want to disable atomic operations on device active io, then set to zero */
#define PQISRC_DEVICE_IO_COUNTER 1
#define PQISRC_DEVICE_IO_COUNTER 1
#define INVALID_ELEM 0xffff
/* #define SHARE_EVENT_QUEUE_FOR_IO 1 */
#define INVALID_ELEM 0xffff
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif
#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
/* defines for stream detection */
#define TICKS ticks
#ifndef INT_MAX
#define INT_MAX 0x7FFFFFFF
#endif
#define PQISRC_ROUND_UP(x, y) (((x) + (y) - 1) / (y) * (y))
#define PQISRC_ROUND_DOWN(x, y) (((x) / (y)) * (y))
#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#if !defined(offsetofend)
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
#endif
#define ALIGN_BOUNDARY(a, n) { \
if (a % n) \
@ -98,7 +129,7 @@ enum INTR_TYPE {
LOCK_SLEEP
};
#define LOCKNAME_SIZE 32
#define LOCKNAME_SIZE 32
#define INTR_TYPE_NONE 0x0
#define INTR_TYPE_FIXED 0x1
@ -108,17 +139,12 @@ enum INTR_TYPE {
#define SIS_ENABLE_INTX 0x80
#define PQISRC_LEGACY_INTX_MASK 0x1
#define DMA_TO_VIRT(mem) ((mem)->virt_addr)
#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff)
#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32)
#define DMA_TO_VIRT(mem) ((mem)->virt_addr)
#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff)
#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32)
typedef enum REQUEST_STATUS {
REQUEST_SUCCESS = 0,
REQUEST_PENDING = -1,
REQUEST_FAILED = -2,
}REQUEST_STATUS_T;
typedef enum IO_PATH {
UNKNOWN_PATH,
AIO_PATH,
RAID_PATH
}IO_PATH_T;
@ -179,44 +205,84 @@ typedef enum controller_state {
#define PQI_CTRL_KERNEL_UP_AND_RUNNING 0x80
#define PQI_CTRL_KERNEL_PANIC 0x100
#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
#define SIS_CMD_STATUS_SUCCESS 0x1
/* PQI specific */
/* defines */
#define PQISRC_PQI_REG_OFFSET 0x4000
#define PQISRC_MAX_OUTSTANDING_REQ 4096
#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16
#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16
#define PQISRC_PQI_REG_OFFSET 0x4000
#define PQI_MIN_OP_IB_QUEUE_ID 1
#define PQI_OP_EVENT_QUEUE_ID 1
#define PQI_MIN_OP_OB_QUEUE_ID 2
#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
/* Number of Queues this driver compile can potentially support */
#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
#define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2)
#define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q)
#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
#define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2
#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
#define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
#define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */
#define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
#define PQISRC_INTR_COALSC_GRAN 0
#define PQISRC_PROTO_BIT_MASK 0
#define PQISRC_SGL_SUPPORTED_BIT_MASK 0
#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
#define PQISRC_NUM_EVENT_Q_ELEM 32
#define PQISRC_EVENT_Q_ELEM_SIZE 32
/* PQI Capability maxes (from controller) */
#define PQISRC_MAX_ELEMENTS 8192
#define PQISRC_OP_MIN_ELEM_SIZE 1 /* 16 bytes */
#define PQISRC_OP_MAX_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
#define PQISRC_MAX_SPANNING_IU_LENGTH 1152
#define PQISRC_MAX_OUTSTANDING_REQ 4096
/* #define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q) */
/* #define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ */
/* #define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2 */
#ifdef DEVICE_HINT
#define PQISRC_MIN_OUTSTANDING_REQ (PQI_RESERVED_IO_SLOTS_CNT + OS_MIN_OUTSTANDING_REQ)
#endif
/* Queue IDs Enumeration */
#define PQI_ADMIN_IB_QUEUE_ID 0
#define PQI_ADMIN_OB_QUEUE_ID 0
#define PQI_MIN_OP_IB_QUEUE_ID 1
#define PQI_OP_EVENT_QUEUE_ID 1
#define PQI_MIN_OP_OB_QUEUE_ID 2
/* PQI IU Element Sizes */
#define PQISRC_ADMIN_IBQ_ELEM_SIZE_BYTES 64
#define PQISRC_ADMIN_OBQ_ELEM_SIZE_BYTES 64
#define PQISRC_OP_IBQ_ELEM_SIZE_BYTES 128
#define PQISRC_OP_OBQ_ELEM_SIZE_BYTES 16
#define PQISRC_EVENT_Q_ELEM_SIZE_BYTES 32
/* Number of elements this driver compile will potentially use */
#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16
#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16
#define PQISRC_MAX_EVENT_QUEUE_ELEM_NUM 32
#define PQISRC_MAX_SPANNING_ELEMS 9
/* setting maximums for adv aio */
#define PQISRC_MAX_AIO_RAID5_OR_6_WRITE (8*1024) /* 8 KiB */
#define PQISRC_MAX_AIO_RAID1_OR_10_WRITE_2DRV 0x0000 /* No Limit */
#define PQISRC_MAX_AIO_RAID1_OR_10_WRITE_3DRV 0x0000 /* No Limit */
#define PQISRC_MAX_AIO_NVME_CRYPTO (32*1024) /* 32 KiB */
#define PQISRC_MAX_AIO_NO_LIMIT 0x0000 /* No Limit */
#define PQISRC_MAX_AIO_RW_XFER_SAS_SATA_CRYPTO 0x0000 /* No Limit */
#define PQISRC_MAX_AIO_RW_XFER_NVME_CRYPTO (32*1024)
#define SENSE_FEATURES_CRYPTO_OFFSET offsetof(bmic_sense_feature_page_io_aio_subpage_t, max_aio_rw_xfer_crypto_sas_sata)
#define MINIMUM_AIO_SUBPAGE_LENGTH \
(offsetofend(bmic_sense_feature_page_io_aio_subpage_t, \
max_aio_write_raid1_10_3drv) - \
(sizeof(((bmic_sense_feature_page_io_aio_subpage_t *)0)->header)))
/* Not used or useful yet */
/* #define PQISRC_INTR_COALSC_GRAN 0 */
/* #define PQISRC_PROTO_BIT_MASK 0 */
/* #define PQISRC_SGL_SUPPORTED_BIT_MASK 0 */
#define PQISRC_MAX_SUPPORTED_MIRRORS 3
/* PQI Registers state status */
#define PQI_RESET_ACTION_RESET 0x1
@ -249,12 +315,20 @@ enum pqisrc_ctrl_mode{
PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
#define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG"
#define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64
#define PQI_ADMINQ_CI_PI_ALIGN 64
#define PQI_OPQ_ELEM_ARRAY_ALIGN 64
#define PQI_OPQ_CI_PI_ALIGN 4
#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */
#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */
#define PQI_ADDR_ALIGN_MASK_4K 0xFFF/* lsb 12 bits */
#define PQI_ADDR_ALIGN_MASK_1K 0x3FF/* lsb 10 bits */
#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */
#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */
#define PQI_ADDR_ALIGN 4096
#define PQI_ADDR_ALIGN_MASK PQI_ADDR_ALIGN_MASK_4K
#define PQI_FORCE_IQ_ELEMENTS 32 /* 4096/128 = 32 (see PQISRC_OP_IBQ_ELEM_SIZE_BYTES) */
#define PQI_FORCE_OQ_ELEMENTS 256 /* 4096/16 = 256 (see PQISRC_OP_OBQ_ELEM_SIZE_BYTES) */
#define PQI_CI_PI_ALIGN 64
#define PQI_CI_PI_ALIGN_MASK PQI_ADDR_ALIGN_MASK_64
#define PQISRC_PQIMODE_READY_TIMEOUT (30 * 1000 ) /* 30 secs */
#define PQISRC_MODE_READY_POLL_INTERVAL 1000 /* 1 msec */
@ -271,37 +345,38 @@ enum pqisrc_ctrl_mode{
/* PQI configuration table section IDs */
#define PQI_CONF_TABLE_ALL_SECTIONS (-1)
#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
#define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1
#define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2
#define PQI_CONF_TABLE_SECTION_DEBUG 3
#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
#define PQI_CONF_TABLE_SECTION_DEBUG 3
#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
#define PQI_CONF_TABLE_SOFT_RESET 5
/* PQI feature bits as defined in PQI_SPEC.doc */
#define PQI_FIRMWARE_FEATURE_OFA 0
#define PQI_FIRMWARE_FEATURE_SMP 1
#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2
#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3
#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4
#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5
#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6
#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7
#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8
#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9
#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10
#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12
#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
#define PQI_FIRMWARE_FEATURE_MAXIMUM 14
#define PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID 16
#define PQI_FIRMWARE_FEATURE_OFA 0
#define PQI_FIRMWARE_FEATURE_SMP 1
#define PQI_FIRMWARE_FEATURE_MAX_KNOWN 2
#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_0 3
#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_1_10 4
#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_5_50 5
#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_6_60 6
#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_0 7
#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_1_10 8
#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_5_50 9
#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_6_60 10
#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
#define PQI_FIRMWARE_FEATURE_SATA_WWN_FOR_DEV_UNIQUE_ID 12
#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT 13
#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT 14
#define PQI_FIRMWARE_FEATURE_MAXIMUM 14
#define CTRLR_HEARTBEAT_CNT(softs) \
LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
#define CTRLR_HEARTBEAT_CNT(softs) LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
#define PQI_HEARTBEAT_TIMEOUT_SEC (10) /* 10 sec interval */
#define PQI_HOST_WELLNESS_TIMEOUT_SEC (24*3600)
/* pqi-2r00a table 36 */
#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
#define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31)
#define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01
@ -326,6 +401,10 @@ enum pqisrc_ctrl_mode{
#define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14
#define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15
#define PQI_REQUEST_IU_AIO_TASK_MANAGEMENT 0x16
#define PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST 0x18
#define PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST 0x19
#define PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST 0x1A
#define PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT 0x20
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
@ -349,13 +428,13 @@ enum pqisrc_ctrl_mode{
#define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14
#define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1
#define PQI_DEFAULT_IB_QUEUE 0
#define PQI_DEFAULT_IB_QUEUE 0
#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0
#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1
#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0
#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1
#define PQI_VENDOR_RESPONSE_IU_INVALID_PARAM 2
#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3
#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3
/* Interface macros */
@ -371,16 +450,30 @@ enum pqisrc_ctrl_mode{
#define PQI_GET_CTRL_MODE(softs) \
(PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0))
#define PQI_SAVE_CTRL_MODE(softs, mode) \
PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode)
#define PQI_SAVE_CTRL_MODE(softs, mode) { \
PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode); \
OS_SLEEP(1000); \
}
#define PQISRC_MAX_TARGETID 1024
#define PQISRC_MAX_TARGETLUN 64
#define LEGACY_SIS_SCR_REG_LENGTH 4
#define LEGACY_SIS_SCR1 LEGACY_SIS_SCR0 + LEGACY_SIS_SCR_REG_LENGTH
#define PQI_GET_CTRL_TYPE(softs) \
((PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad1, LEGACY_SIS_SCR1)) \
& 0x0000FFFF)
/* smart raid-hba pqi functional spec, scratchpad register 1 spec */
#define PQI_CTRL_PRODUCT_ID_GEN1 0x0000
#define PQI_CTRL_PRODUCT_ID_GEN2_REV_A 0x0007
#define PQI_CTRL_PRODUCT_ID_GEN2_REV_B 0x0107
#define PQISRC_MAX_TARGETID 1024
#define PQISRC_MAX_TARGETLUN 64
/* Vendor specific IU Type for Event config Cmds */
#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
@ -409,40 +502,42 @@ enum pqisrc_ctrl_mode{
/* Device flags */
#define PQISRC_DFLAG_VALID (1 << 0)
#define PQISRC_DFLAG_CONFIGURING (1 << 1)
#define PQISRC_DFLAG_VALID (1 << 0)
#define PQISRC_DFLAG_CONFIGURING (1 << 1)
#define MAX_EMBEDDED_SG_IN_FIRST_IU 4
#define MAX_EMBEDDED_SG_IN_IU 8
#define MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT 4
#define MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO 3
#define MAX_EMBEDDED_SG_IN_IU 8
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
#define DEV_GONE(dev) (!dev || (dev->invalid == true))
#define IS_AIO_PATH(dev) (dev->aio_enabled)
#define IS_RAID_PATH(dev) (!dev->aio_enabled)
#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
#define DEV_GONE(dev) (!dev || (dev->invalid == true))
#define IS_AIO_PATH(dev) (dev->aio_enabled)
#define IS_RAID_PATH(dev) (!dev->aio_enabled)
#define DEVICE_RESET(dvp) (dvp->reset_in_progress)
#define DEVICE_RESET(dvp) (dvp->reset_in_progress)
/* SOP data direction flags */
#define SOP_DATA_DIR_NONE 0x00
#define SOP_DATA_DIR_FROM_DEVICE 0x01
#define SOP_DATA_DIR_TO_DEVICE 0x02
#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
#define SOP_PARTIAL_DATA_BUFFER 0x04
#define SOP_DATA_DIR_UNKNOWN 0xFF
#define SOP_DATA_DIR_NONE 0x00
#define SOP_DATA_DIR_FROM_DEVICE 0x01
#define SOP_DATA_DIR_TO_DEVICE 0x02
#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
#define SOP_PARTIAL_DATA_BUFFER 0x04
#define PQISRC_DMA_VALID (1 << 0)
#define PQISRC_CMD_NO_INTR (1 << 1)
#define PQISRC_DMA_VALID (1 << 0)
#define PQISRC_CMD_NO_INTR (1 << 1)
#define SOP_TASK_ATTRIBUTE_SIMPLE 0
#define SOP_TASK_ATTRIBUTE_SIMPLE 0
#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
#define SOP_TASK_ATTRIBUTE_ORDERED 2
#define SOP_TASK_ATTRIBUTE_ACA 4
#define SOP_TASK_ATTRIBUTE_ORDERED 2
#define SOP_TASK_ATTRIBUTE_ACA 4
#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
#define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5
#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02
#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
@ -499,13 +594,15 @@ enum pqisrc_ctrl_mode{
#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
#define NUM_STREAMS_PER_LUN 8
/* VPD inquiry pages */
#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */
#define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */
#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
#define VPD_PAGE (1 << 8)
@ -555,10 +652,17 @@ enum pqisrc_ctrl_mode{
#define MAX_RAW_M16_QDEPTH 2032
#define PQI_PTRAID_UPDATE_ON_RESCAN_LUNS 0x80000000
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
#define RAID_CTLR_LUNID ((uint8_t *) "\0\0\0\0\0\0\0\0")
/* SCSI Cmds @todo: move SCMD_READ_6, etc. into library */
#define SCSI_INQUIRY 0x12
#define SCSI_MODE_SENSE 0x1a
#define SCSI_REPORT_LUNS 0xa0
#define SCSI_LOG_SENSE 0x4d
#define SCSI_ATA_PASSTHRU16 0x85
#define SA_CACHE_FLUSH 0x1
#define PQISRC_INQUIRY_TIMEOUT 30
#define SA_INQUIRY 0x12
#define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
@ -570,10 +674,8 @@ enum pqisrc_ctrl_mode{
#define SCSI_SENSE_RESPONSE_72 0x72
#define SCSI_SENSE_RESPONSE_73 0x73
#define SA_REPORT_LOG_EXTENDED 0x1
#define SA_REPORT_PHYS_EXTENDED 0x2
#define SA_CACHE_FLUSH_BUF_LEN 4
#define SA_REPORT_LOG_EXTENDED 0x1
#define SA_REPORT_PHYS_EXTENDED 0x2
#define GET_SCSI_SNO(cmd) (cmd->cmdId.serialNumber)
@ -636,11 +738,14 @@ enum pqisrc_ctrl_mode{
#define PQI_MAX_DEVICES (PQI_MAX_LOGICALS + PQI_MAX_PHYSICALS + 1) /* 1 for controller device entry */
#define PQI_MAX_EXT_TARGETS 32
#define PQI_CTLR_INDEX (PQI_MAX_DEVICES - 1)
#define PQI_CTLR_INDEX 0
#define PQI_PD_INDEX(t) (t + PQI_MAX_LOGICALS)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define MAX_TARGET_DEVICES 1024
#define MAX_TARGET_BIT 1025
#define SLOT_AVAILABLE false
#define SLOT_TAKEN true
#define PQI_NO_MEM 2
@ -648,6 +753,7 @@ typedef enum pqisrc_device_status {
DEVICE_NOT_FOUND,
DEVICE_CHANGED,
DEVICE_UNCHANGED,
DEVICE_IN_REMOVE,
} device_status_t;
#define SA_RAID_0 0
@ -666,19 +772,27 @@ typedef enum pqisrc_device_status {
#define BIT3 (1 << 3)
#define BITS_PER_BYTE 8
/* Vendor Specific (BMIC) Op Code */
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
#define IS_BMIC_OPCODE(opcode) (opcode == BMIC_READ || opcode == BMIC_WRITE)
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
#define BMIC_SENSE_FEATURE 0x61
#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
#define BMIC_CACHE_FLUSH 0xc2
#define BMIC_FLASH_FIRMWARE 0xf7
#define BMIC_WRITE_HOST_WELLNESS 0xa5
#define BMIC_SET_DIAGS_OPTIONS 0xf4
#define BMIC_SENSE_DIAGS_OPTIONS 0xf5
#define BMIC_WRITE_HOST_WELLNESS 0xa5
#define BMIC_CACHE_FLUSH 0xc2
#define BMIC_SET_DIAGS_OPTIONS 0xf4
#define BMIC_SENSE_DIAGS_OPTIONS 0xf5
#define BMIC_FLASH_FIRMWARE 0xf7
/* Sense Feature Pages/Subpages */
#define IO_SENSE_FEATURES_PAGE 0x08
#define SENSE_FEATURES_AIO_SUBPAGE 0x02
#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0)
#define BMIC_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3F)
@ -700,9 +814,20 @@ typedef enum pqisrc_device_status {
PQI_RESERVED_IO_SLOTS_TMF + \
PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS)
/* Defines for print flags */
#define PRINT_FLAG_HDR_COLUMN 0x0001
/* Defines for counter flags */
#define COUNTER_FLAG_CLEAR_COUNTS 0x0001
#define COUNTER_FLAG_ONLY_NON_ZERO 0x0002
/* Defines for print flags */
#define PRINT_FLAG_HDR_COLUMN 0x0001
/* Function-specific debug flags */
#if 0
#define DEBUG_AIO /* show AIO eligibility, IU, etc. (very spammy!) */
#define DEBUG_AIO_LOCATOR /* show AIO row/column etc. calc. */
#define DEBUG_RAID_MAP /* show AIO raid map content from FW */
#endif
static inline uint16_t GET_LE16(const uint8_t *p)
{
@ -754,8 +879,27 @@ static inline void PUT_BE64(uint64_t val, uint8_t *p)
PUT_BE32(val, p + 4);
}
#define OS_FREEBSD
#define SIS_POLL_WAIT
/* Calculates percentage of val vs total, i.e. 20 out of 100 --> 20% */
static inline uint64_t CALC_PERCENT_TOTAL(uint64_t val, uint64_t total)
{
uint64_t percent = 0;
if (total)
percent = (val * 100) / total;
return percent;
}
/* Calculates percentage of a vs b, i.e. 50 vs 100 -> 50/150 -> 33% */
#define CALC_PERCENT_VS(a, b) (CALC_PERCENT_TOTAL(a, (a+b)))
#define STREAM_DETECTION "stream_disable"
#define SATA_UNIQUE_WWN "sata_unique_wwn_disable"
#define AIO_RAID1_WRITE_BYPASS "aio_raid1_write_disable"
#define AIO_RAID5_WRITE_BYPASS "aio_raid5_write_disable"
#define AIO_RAID6_WRITE_BYPASS "aio_raid6_write_disable"
#define ADAPTER_QUEUE_DEPTH "queue_depth"
#define SCATTER_GATHER_COUNT "sg_count"
#define QUEUE_COUNT "queue_count"
#define OS_ATTRIBUTE_PACKED __attribute__((__packed__))
#define OS_ATTRIBUTE_ALIGNED(n) __attribute__((aligned(n)))
@ -793,19 +937,18 @@ typedef struct _driver_info
typedef uint8_t *passthru_buf_type_t;
#define PQISRC_OS_VERSION 1
#define PQISRC_FEATURE_VERSION 4014
#define PQISRC_PATCH_VERSION 0
#define PQISRC_BUILD_VERSION 105
#define PQISRC_DRIVER_MAJOR __FreeBSD__
#define PQISRC_DRIVER_MINOR 4410
#define PQISRC_DRIVER_RELEASE 0
#define PQISRC_DRIVER_REVISION 2005
#define STR(s) # s
#define PQISRC_VERSION(a, b, c, d) STR(a.b.c.d)
#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_OS_VERSION, \
PQISRC_FEATURE_VERSION, \
PQISRC_PATCH_VERSION, \
PQISRC_BUILD_VERSION)
#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d)
#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_DRIVER_MAJOR, \
PQISRC_DRIVER_MINOR, \
PQISRC_DRIVER_RELEASE, \
PQISRC_DRIVER_REVISION)
/* End Management interface */
#ifdef ASSERT
@ -880,7 +1023,6 @@ typedef struct PCI_ACC_HANDLE {
#define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */
#define LEGACY_SIS_SRCV_OFFSET_MAILBOX_7 0x101C /* mailbox 7 register offset */
#define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */
#define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */
@ -892,7 +1034,7 @@ typedef struct PCI_ACC_HANDLE {
#define PQI_SIGNATURE 0x4000
#define PQI_ADMINQ_CONFIG 0x4008
#define PQI_ADMINQ_CAP 0x4010
#define PQI_LEGACY_INTR_STATUS 0x4018
#define PQI_LEGACY_INTR_STATUS 0x4018
#define PQI_LEGACY_INTR_MASK_SET 0x401C
#define PQI_LEGACY_INTR_MASK_CLR 0x4020
#define PQI_DEV_STATUS 0x4040
@ -912,7 +1054,7 @@ typedef struct PCI_ACC_HANDLE {
#define OS_BUSYWAIT(x) DELAY(x)
#define OS_SLEEP(timeout) \
DELAY(timeout);
/* TMF request timeout is 600 Sec */
#define OS_TMF_TIMEOUT_SEC (10 * 60)
@ -950,14 +1092,14 @@ typedef struct pqi_intr_ctx {
typedef uint8_t os_dev_info_t;
typedef struct OS_SPECIFIC {
device_t pqi_dev;
device_t pqi_dev;
struct resource *pqi_regs_res0; /* reg. if. window */
int pqi_regs_rid0; /* resource ID */
bus_dma_tag_t pqi_parent_dmat; /* parent DMA tag */
bus_dma_tag_t pqi_buffer_dmat;
/* controller hardware interface */
int pqi_hwif;
int pqi_hwif;
struct resource *pqi_irq[PQI_MAX_MSIX]; /* interrupt */
int pqi_irq_rid[PQI_MAX_MSIX];
void *intrcookie[PQI_MAX_MSIX];
@ -980,49 +1122,61 @@ typedef struct OS_SPECIFIC {
struct callout heartbeat_timeout_id; /* heart beat event handling */
} OS_SPECIFIC_T;
typedef bus_addr_t dma_addr_t;
typedef struct device_hints {
uint8_t stream_status: 1;
uint8_t sata_unique_wwn_status: 1;
uint8_t aio_raid1_write_status: 1;
uint8_t aio_raid5_write_status: 1;
uint8_t aio_raid6_write_status: 1;
uint32_t queue_depth;
uint32_t sg_segments;
uint32_t cpu_count;
} device_hint;
typedef bus_addr_t dma_addr_t;
/* Register access macros */
#define PCI_MEM_GET32( _softs, _absaddr, _offset ) \
bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset)
bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset)
#if defined(__i386__)
#define PCI_MEM_GET64( _softs, _absaddr, _offset ) ({ \
(uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset) + \
((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \
_softs->pci_mem_handle.pqi_bhandle, _offset) + \
((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \
})
#else
#define PCI_MEM_GET64(_softs, _absaddr, _offset ) \
bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset)
bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset)
#endif
#define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \
bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset, _val)
bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset, _val)
#if defined(__i386__)
#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset, _val); \
_softs->pci_mem_handle.pqi_bhandle, _offset, _val); \
bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset + 4, _val >> 32);
#else
#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset, _val)
bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
_softs->pci_mem_handle.pqi_bhandle, _offset, _val)
#endif
#define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \
bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\
_softs->pci_mem_handle.pqi_bhandle, _offset, buf, size)
/* Lock */
typedef struct mtx OS_LOCK_T;
typedef struct sema OS_SEMA_LOCK_T;
@ -1038,8 +1192,7 @@ typedef struct sema OS_SEMA_LOCK_T;
#define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock)
#define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock)
#define OS_GET_CDBP(rcb) \
((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
#define OS_GET_CDBP(rcb) ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
#define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len)
#define IS_OS_SCSICMD(rcb) (rcb && !rcb->tm_req && rcb->cm_ccb)
@ -1070,8 +1223,14 @@ typedef struct sema OS_SEMA_LOCK_T;
#define SCMD_WRITE_16 WRITE_16
/* FreeBSD status macros */
#define BSD_SUCCESS 0
#define BSD_SUCCESS 0
#define DEVICE_HINT_SUCCESS 0
/* Min outstanding commands that driver can register with CAM layer.*/
#define OS_MIN_OUTSTANDING_REQ 6
#define BSD_MIN_SG_SEGMENTS 16
#define DISABLE_ERR_RESP_VERBOSE 1
/* Debug facility */
@ -1110,13 +1269,6 @@ static int logging_level = PQISRC_LOG_LEVEL;
} \
}while(0);
#define DBG_TRACEIO(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_TRACEIO) { \
printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
} \
}while(0);
#define DBG_DISC(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_DISC) { \
@ -1124,6 +1276,13 @@ static int logging_level = PQISRC_LOG_LEVEL;
} \
}while(0);
#define DBG_TRACEIO(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_TRACEIO) { \
printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
} \
}while(0);
#define DBG_WARN(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_WARN) { \
@ -1137,6 +1296,12 @@ static int logging_level = PQISRC_LOG_LEVEL;
printf("[ERROR]::[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args); \
} \
}while(0);
#define DBG_ERR_NO_SOFTS(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_ERROR) { \
printf("[ERROR]::[CPU %d][%s][%d]:"fmt,curcpu,__func__,__LINE__,##args); \
} \
}while(0);
#define DBG_IO(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_TRACEIO) { \
@ -1147,21 +1312,21 @@ static int logging_level = PQISRC_LOG_LEVEL;
#define DBG_ERR_BTL(device,fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_ERROR) { \
printf("[ERROR]::[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \
printf("[ERROR]::[%u:%u.%u][%d,%d,%d][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \
} \
}while(0);
#define DBG_WARN_BTL(device,fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_WARN) { \
printf("[WARN]:[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\
printf("[WARN]:[%u:%u.%u][%d,%d,%d][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\
} \
}while(0);
#define DBG_NOTE(fmt,args...) \
do { \
if (logging_level & PQISRC_FLAGS_NOTE) { \
printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
printf("[NOTE]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
} \
}while(0);

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -62,6 +62,7 @@ pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
struct pqi_event *event)
{
int ret;
pqi_event_acknowledge_request_t request;
ib_queue_t *ib_q = &softs->op_raid_ib_q[0];
int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT;
@ -77,8 +78,11 @@ pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
request.additional_event_id = event->additional_event_id;
/* Submit Event Acknowledge */
pqisrc_submit_cmnd(softs, ib_q, &request);
ret = pqisrc_submit_cmnd(softs, ib_q, &request);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit acknowledge command\n");
goto out;
}
/*
* We have to special-case this type of request because the firmware
@ -91,9 +95,10 @@ pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
if (tmo <= 0) {
DBG_ERR("wait for event acknowledge timed out\n");
DBG_ERR("tmo : %d\n",tmo);
}
}
DBG_FUNC(" OUT\n");
out:
DBG_FUNC("OUT\n");
}
/*
@ -178,11 +183,10 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
event_q = &softs->event_q;
obq_ci = event_q->ci_local;
obq_pi = *(event_q->pi_virt_addr);
DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi);
while(1) {
int event_index;
DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi);
DBG_INFO("Event queue_id : %d, ci : %u, pi : %u\n",obq_id, obq_ci, obq_pi);
if (obq_pi == obq_ci)
break;
@ -191,10 +195,13 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
/* Copy the response */
memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size),
sizeof(pqi_event_response_t));
DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type);
DBG_INFO("response.event_type : 0x%x \n", response.event_type);
DBG_INIT("event iu_type=0x%x event_type=0x%x\n",
response.header.iu_type, response.event_type);
event_index = pqisrc_event_type_to_event_index(response.event_type);
if ( event_index == PQI_EVENT_LOGICAL_DEVICE) {
softs->ld_rescan = true;
}
if (event_index >= 0) {
if(response.request_acknowledge) {
@ -224,6 +231,58 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
}
/*
* Function used to build and send the vendor general request
* Used for configuring PQI feature bits between firmware and driver
*/
int
pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs,
struct pqi_vendor_general_request *request)
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
rcb_t *rcb = NULL;
/* Get the tag */
request->request_id = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == request->request_id) {
DBG_ERR("Tag not available\n");
ret = PQI_STATUS_FAILURE;
goto err_notag;
}
request->response_id = ob_q->q_id;
rcb = &softs->rcb[request->request_id];
rcb->req_pending = true;
rcb->tag = request->request_id;
ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit command\n");
goto err_out;
}
ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Management request timed out!\n");
goto err_out;
}
ret = rcb->status;
err_out:
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request->request_id);
err_notag:
DBG_FUNC("OUT \n");
return ret;
}
/*
* Function used to send a general management request to adapter.
*/
@ -248,6 +307,7 @@ pqisrc_submit_management_req(pqisrc_softstate_t *softs,
rcb = &softs->rcb[request->request_id];
rcb->req_pending = true;
rcb->tag = request->request_id;
/* Submit command on operational raid ib queue */
ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
if (ret != PQI_STATUS_SUCCESS) {
@ -256,6 +316,7 @@ pqisrc_submit_management_req(pqisrc_softstate_t *softs,
}
ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Management request timed out !!\n");
goto err_cmd;
@ -331,7 +392,7 @@ pqisrc_report_event_config(pqisrc_softstate_t *softs)
DBG_FUNC(" IN\n");
memset(&buf_report_event, 0, sizeof(struct dma_mem));
buf_report_event.tag = "pqi_report_event_buf" ;
os_strlcpy(buf_report_event.tag, "pqi_report_event_buf", sizeof(buf_report_event.tag)); ;
buf_report_event.size = alloc_size;
buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN;
@ -392,7 +453,7 @@ pqisrc_set_event_config(pqisrc_softstate_t *softs)
DBG_FUNC(" IN\n");
memset(&buf_set_event, 0, sizeof(struct dma_mem));
buf_set_event.tag = "pqi_set_event_buf";
os_strlcpy(buf_set_event.tag, "pqi_set_event_buf", sizeof(buf_set_event.tag));
buf_set_event.size = alloc_size;
buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN;

View File

@ -0,0 +1,520 @@
/*-
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "smartpqi_includes.h"
/*
* Checks a firmware feature status, given bit position.
*/
static inline boolean_t
pqi_is_firmware_feature_supported(
struct pqi_config_table_firmware_features *firmware_features,
unsigned int bit_position)
{
unsigned int byte_index;
byte_index = bit_position / BITS_PER_BYTE;
if (byte_index >= firmware_features->num_elements) {
DBG_ERR_NO_SOFTS("Invalid byte index for bit position %u\n",
bit_position);
return false;
}
return (firmware_features->features_supported[byte_index] &
(1 << (bit_position % BITS_PER_BYTE))) ? true : false;
}
/*
* Counts down into the enabled section of firmware
* features and reports current enabled status, given
* bit position.
*/
static inline boolean_t
pqi_is_firmware_feature_enabled(
struct pqi_config_table_firmware_features *firmware_features,
uint8_t *firmware_features_iomem_addr,
unsigned int bit_position)
{
unsigned int byte_index;
uint8_t *features_enabled_iomem_addr;
byte_index = (bit_position / BITS_PER_BYTE) +
(firmware_features->num_elements * 2);
features_enabled_iomem_addr = firmware_features_iomem_addr +
offsetof(struct pqi_config_table_firmware_features,
features_supported) + byte_index;
return (*features_enabled_iomem_addr &
(1 << (bit_position % BITS_PER_BYTE))) ? true : false;
}
/*
* Sets the given bit position for the driver to request the indicated
* firmware feature be enabled.
*/
static inline void
pqi_request_firmware_feature(
struct pqi_config_table_firmware_features *firmware_features,
unsigned int bit_position)
{
unsigned int byte_index;
/* byte_index adjusted to index into requested start bits */
byte_index = (bit_position / BITS_PER_BYTE) +
firmware_features->num_elements;
/* setting requested bits of local firmware_features */
firmware_features->features_supported[byte_index] |=
(1 << (bit_position % BITS_PER_BYTE));
}
/*
* Creates and sends the request for firmware to update the config
* table.
*/
static int
pqi_config_table_update(pqisrc_softstate_t *softs,
uint16_t first_section, uint16_t last_section)
{
struct pqi_vendor_general_request request;
int ret;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
request.data.config_table_update.first_section = first_section;
request.data.config_table_update.last_section = last_section;
ret = pqisrc_build_send_vendor_request(softs, &request);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
}
return ret;
}
/*
* Copies requested features bits into firmware config table,
* checks for support, and returns status of updating the config table.
*/
static int
pqi_enable_firmware_features(pqisrc_softstate_t *softs,
struct pqi_config_table_firmware_features *firmware_features,
uint8_t *firmware_features_abs_addr)
{
uint8_t *features_requested;
uint8_t *features_requested_abs_addr;
uint16_t *host_max_known_feature_iomem_addr;
uint16_t pqi_max_feature = PQI_FIRMWARE_FEATURE_MAXIMUM;
features_requested = firmware_features->features_supported +
firmware_features->num_elements;
features_requested_abs_addr = firmware_features_abs_addr +
(features_requested - (uint8_t*)firmware_features);
/*
* NOTE: This memcpy is writing to a BAR-mapped address
* which may not be safe for all OSes without proper API
*/
memcpy(features_requested_abs_addr, features_requested,
firmware_features->num_elements);
if (pqi_is_firmware_feature_supported(firmware_features,
PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
host_max_known_feature_iomem_addr =
(uint16_t*)(features_requested_abs_addr +
(firmware_features->num_elements * 2) + sizeof(uint16_t));
/*
* NOTE: This writes to a BAR-mapped address
* which may not be safe for all OSes without proper API
*/
*host_max_known_feature_iomem_addr = pqi_max_feature;
}
return pqi_config_table_update(softs,
PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
}
typedef struct pqi_firmware_feature pqi_firmware_feature_t;
typedef void (*feature_status_fn)(pqisrc_softstate_t *softs,
pqi_firmware_feature_t *firmware_feature);
struct pqi_firmware_feature {
char *feature_name;
unsigned int feature_bit;
boolean_t supported;
boolean_t enabled;
feature_status_fn feature_status;
};
static void
pqi_firmware_feature_status(pqisrc_softstate_t *softs,
struct pqi_firmware_feature *firmware_feature)
{
if (!firmware_feature->supported) {
DBG_NOTE("%s not supported by controller\n",
firmware_feature->feature_name);
return;
}
if (firmware_feature->enabled) {
DBG_NOTE("%s enabled\n", firmware_feature->feature_name);
return;
}
DBG_NOTE("failed to enable %s\n", firmware_feature->feature_name);
}
static void
pqi_ctrl_update_feature_flags(pqisrc_softstate_t *softs,
struct pqi_firmware_feature *firmware_feature)
{
switch (firmware_feature->feature_bit) {
case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
softs->aio_raid1_write_bypass = firmware_feature->enabled;
break;
case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
softs->aio_raid5_write_bypass = firmware_feature->enabled;
break;
case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
softs->aio_raid6_write_bypass = firmware_feature->enabled;
break;
case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
softs->timeout_in_passthrough = true;
break;
case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
softs->timeout_in_tmf = true;
break;
case PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN:
break;
case PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID:
softs->page83id_in_rpl = true;
break;
default:
DBG_NOTE("Nothing to do\n");
return;
break;
}
/* for any valid feature, also go update the feature status. */
pqi_firmware_feature_status(softs, firmware_feature);
}
static inline void
pqi_firmware_feature_update(pqisrc_softstate_t *softs,
struct pqi_firmware_feature *firmware_feature)
{
if (firmware_feature->feature_status)
firmware_feature->feature_status(softs, firmware_feature);
}
/* Defines PQI features that driver wishes to support */
static struct pqi_firmware_feature pqi_firmware_features[] = {
#if 0
{
.feature_name = "Online Firmware Activation",
.feature_bit = PQI_FIRMWARE_FEATURE_OFA,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "Serial Management Protocol",
.feature_bit = PQI_FIRMWARE_FEATURE_SMP,
.feature_status = pqi_firmware_feature_status,
},
#endif
{
.feature_name = "SATA WWN Unique ID",
.feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN,
.feature_status = pqi_ctrl_update_feature_flags,
},
{
.feature_name = "RAID IU Timeout",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
.feature_status = pqi_ctrl_update_feature_flags,
},
{
.feature_name = "TMF IU Timeout",
.feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
.feature_status = pqi_ctrl_update_feature_flags,
},
{
.feature_name = "Support for RPL WWID filled by Page83 identifier",
.feature_bit = PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID,
.feature_status = pqi_ctrl_update_feature_flags,
},
/* Features independent of Maximum Known Feature should be added
before Maximum Known Feature*/
{
.feature_name = "Maximum Known Feature",
.feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "RAID 0 Read Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "RAID 1 Read Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "RAID 5 Read Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "RAID 6 Read Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "RAID 0 Write Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "RAID 1 Write Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
.feature_status = pqi_ctrl_update_feature_flags,
},
{
.feature_name = "RAID 5 Write Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
.feature_status = pqi_ctrl_update_feature_flags,
},
{
.feature_name = "RAID 6 Write Bypass",
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
.feature_status = pqi_ctrl_update_feature_flags,
},
#if 0
{
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
.feature_status = pqi_ctrl_update_feature_flags,
},
#endif
};
static void
pqi_process_firmware_features(pqisrc_softstate_t *softs,
void *features, void *firmware_features_abs_addr)
{
int rc;
struct pqi_config_table_firmware_features *firmware_features = features;
unsigned int i;
unsigned int num_features_supported;
/* Iterates through local PQI feature support list to
see if the controller also supports the feature */
for (i = 0, num_features_supported = 0;
i < ARRAY_SIZE(pqi_firmware_features); i++) {
/*Check if SATA_WWN_FOR_DEV_UNIQUE_ID feature enabled by setting module
parameter if not avoid checking for the feature*/
if ((pqi_firmware_features[i].feature_bit ==
PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN) &&
(!softs->sata_unique_wwn)) {
continue;
}
if (pqi_is_firmware_feature_supported(firmware_features,
pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].supported = true;
num_features_supported++;
} else {
DBG_WARN("Feature %s is not supported by firmware\n",
pqi_firmware_features[i].feature_name);
pqi_firmware_feature_update(softs,
&pqi_firmware_features[i]);
/* if max known feature bit isn't supported,
* then no other feature bits are supported.
*/
if (pqi_firmware_features[i].feature_bit ==
PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)
break;
}
}
DBG_INFO("Num joint features supported : %u \n", num_features_supported);
if (num_features_supported == 0)
return;
/* request driver features that are also on firmware-supported list */
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
#ifdef DEVICE_HINT
if (check_device_hint_status(softs, pqi_firmware_features[i].feature_bit))
continue;
#endif
pqi_request_firmware_feature(firmware_features,
pqi_firmware_features[i].feature_bit);
}
/* enable the features that were successfully requested. */
rc = pqi_enable_firmware_features(softs, firmware_features,
firmware_features_abs_addr);
if (rc) {
DBG_ERR("failed to enable firmware features in PQI configuration table\n");
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
pqi_firmware_feature_update(softs,
&pqi_firmware_features[i]);
}
return;
}
/* report the features that were successfully enabled. */
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
if (pqi_is_firmware_feature_enabled(firmware_features,
firmware_features_abs_addr,
pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].enabled = true;
} else {
DBG_WARN("Feature %s could not be enabled.\n",
pqi_firmware_features[i].feature_name);
}
pqi_firmware_feature_update(softs,
&pqi_firmware_features[i]);
}
}
static void
pqi_init_firmware_features(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
pqi_firmware_features[i].supported = false;
pqi_firmware_features[i].enabled = false;
}
}
static void
pqi_process_firmware_features_section(pqisrc_softstate_t *softs,
void *features, void *firmware_features_abs_addr)
{
pqi_init_firmware_features();
pqi_process_firmware_features(softs, features, firmware_features_abs_addr);
}
/*
* Get the PQI configuration table parameters.
* Currently using for heart-beat counter scratch-pad register.
*/
int
pqisrc_process_config_table(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
uint32_t config_table_size;
uint32_t section_off;
uint8_t *config_table_abs_addr;
struct pqi_conf_table *conf_table;
struct pqi_conf_table_section_header *section_hdr;
config_table_size = softs->pqi_cap.conf_tab_sz;
if (config_table_size < sizeof(*conf_table) ||
config_table_size > PQI_CONF_TABLE_MAX_LEN) {
DBG_ERR("Invalid PQI conf table length of %u\n",
config_table_size);
return ret;
}
conf_table = os_mem_alloc(softs, config_table_size);
if (!conf_table) {
DBG_ERR("Failed to allocate memory for PQI conf table\n");
return ret;
}
config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
softs->pqi_cap.conf_tab_off);
PCI_MEM_GET_BUF(softs, config_table_abs_addr,
softs->pqi_cap.conf_tab_off,
(uint8_t*)conf_table, config_table_size);
if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
sizeof(conf_table->sign)) != 0) {
DBG_ERR("Invalid PQI config signature\n");
goto out;
}
section_off = LE_32(conf_table->first_section_off);
while (section_off) {
if (section_off+ sizeof(*section_hdr) >= config_table_size) {
DBG_INFO("Reached end of PQI config table. Breaking off.\n");
break;
}
section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
switch (LE_16(section_hdr->section_id)) {
case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
break;
case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
pqi_process_firmware_features_section(softs, section_hdr, (config_table_abs_addr + section_off));
break;
case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
case PQI_CONF_TABLE_SECTION_DEBUG:
break;
case PQI_CONF_TABLE_SECTION_HEARTBEAT:
softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
section_off +
offsetof(struct pqi_conf_table_heartbeat, heartbeat_counter);
softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
softs->heartbeat_counter_off);
ret = PQI_STATUS_SUCCESS;
break;
case PQI_CONF_TABLE_SOFT_RESET:
break;
default:
DBG_NOTE("unrecognized PQI config table section ID: 0x%x\n",
LE_16(section_hdr->section_id));
break;
}
section_off = LE_16(section_hdr->next_section_off);
}
out:
os_mem_free(softs, (void *)conf_table,config_table_size);
return ret;
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -26,57 +26,6 @@
#include "smartpqi_includes.h"
/* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */
void
pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t diags_options = 0;
pqisrc_raid_req_t request;
DBG_NOTE("IN\n");
memset(&request, 0, sizeof(request));
/* read diags options of controller */
ret = pqisrc_build_send_raid_request(softs, &request,
(void*)&diags_options,
sizeof(diags_options),
BMIC_SENSE_DIAGS_OPTIONS,
0, (uint8_t *)RAID_CTLR_LUNID, NULL);
if (ret != PQI_STATUS_SUCCESS) {
DBG_WARN("Request failed for BMIC Sense Diags Option command."
"ret:%d\n",ret);
return;
}
DBG_NOTE("diags options data after read: %#x\n",diags_options);
diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS;
DBG_NOTE("diags options data to write: %#x\n",diags_options);
memset(&request, 0, sizeof(request));
/* write specified diags options to controller */
ret = pqisrc_build_send_raid_request(softs, &request,
(void*)&diags_options,
sizeof(diags_options),
BMIC_SET_DIAGS_OPTIONS,
0, (uint8_t *)RAID_CTLR_LUNID, NULL);
if (ret != PQI_STATUS_SUCCESS)
DBG_WARN("Request failed for BMIC Set Diags Option command."
"ret:%d\n",ret);
#if 0
diags_options = 0;
memset(&request, 0, sizeof(request));
ret = pqisrc_build_send_raid_request(softs, &request,
(void*)&diags_options,
sizeof(diags_options),
BMIC_SENSE_DIAGS_OPTIONS,
0, (uint8_t *)RAID_CTLR_LUNID, NULL);
if (ret != PQI_STATUS_SUCCESS)
DBG_WARN("Request failed for BMIC Sense Diags Option command."
"ret:%d\n",ret);
DBG_NOTE("diags options after re-read: %#x\n",diags_options);
#endif
DBG_NOTE("OUT\n");
}
/*
* Function used to validate the adapter health.
*/
@ -89,7 +38,6 @@ pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
return !softs->ctrl_online;
}
/* Function used set/clear legacy INTx bit in Legacy Interrupt INTx
* mask clear pqi register
*/
@ -97,20 +45,14 @@ void
pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx)
{
uint32_t intx_mask;
uint32_t *reg_addr __unused;
DBG_FUNC("IN\n");
DBG_FUNC("IN\n");
if (enable_intx)
reg_addr = &softs->pqi_reg->legacy_intr_mask_clr;
else
reg_addr = &softs->pqi_reg->legacy_intr_mask_set;
intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR);
intx_mask = PCI_MEM_GET32(softs, 0, PQI_LEGACY_INTR_MASK_CLR);
intx_mask |= PQISRC_LEGACY_INTX_MASK;
PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask);
PCI_MEM_PUT32(softs, 0, PQI_LEGACY_INTR_MASK_CLR ,intx_mask);
DBG_FUNC("OUT\n");
DBG_FUNC("OUT\n");
}
/*
@ -120,16 +62,14 @@ void
pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
{
pqi_scsi_dev_t *device = NULL;
int i,j;
int i;
DBG_FUNC("IN\n");
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
if(softs->device_list[i][j] == NULL)
continue;
device = softs->device_list[i][j];
pqisrc_remove_device(softs, device);
}
device = softs->dev_list[i];
if(device == NULL)
continue;
pqisrc_remove_device(softs, device);
}
DBG_FUNC("OUT\n");
@ -143,17 +83,17 @@ pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->ctrl_online = false;
int lockupcode = 0;
softs->ctrl_online = false;
if (SIS_IS_KERNEL_PANIC(softs)) {
lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode);
}
else {
pqisrc_trigger_nmi_sis(softs);
}
lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode);
}
else {
pqisrc_trigger_nmi_sis(softs);
}
os_complete_outstanding_cmds_nodevice(softs);
pqisrc_wait_for_rescan_complete(softs);
@ -169,23 +109,34 @@ void
pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
{
uint8_t take_offline = false;
uint64_t new_heartbeat;
static uint32_t running_ping_cnt = 0;
DBG_FUNC("IN\n");
if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
new_heartbeat = CTRLR_HEARTBEAT_CNT(softs);
DBG_IO("heartbeat old=%lx new=%lx\n", softs->prev_heartbeat_count, new_heartbeat);
if (new_heartbeat == softs->prev_heartbeat_count) {
take_offline = true;
goto take_ctrl_offline;
}
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
softs->prev_heartbeat_count = %lx\n",
CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count);
#if 1
/* print every 30 calls (should print once/minute) */
running_ping_cnt++;
if ((running_ping_cnt % 30) == 0)
print_all_counters(softs, COUNTER_FLAG_ONLY_NON_ZERO);
#endif
softs->prev_heartbeat_count = new_heartbeat;
take_ctrl_offline:
if (take_offline){
DBG_ERR("controller is offline\n");
pqisrc_take_ctrl_offline(softs);
os_stop_heartbeat_timer(softs);
pqisrc_take_ctrl_offline(softs);
}
DBG_FUNC("OUT\n");
}
@ -253,7 +204,7 @@ pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
boolean_t
pqisrc_is_hba_lunid(uint8_t *scsi3addr)
{
return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID);
return pqisrc_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
/* Function used to validate type of device */
@ -287,8 +238,8 @@ static char *raid_levels[] = {
"RAID 1(1+0)",
"RAID 5",
"RAID 5+1",
"RAID ADG",
"RAID 1(ADM)",
"RAID 6",
"RAID 1(Triple)",
};
/* Get the RAID level from the index */
@ -417,6 +368,7 @@ check_struct_sizes(void)
}
#if 0
uint32_t
pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
@ -436,7 +388,7 @@ void
check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
uint32_t tag = softs->max_outstanding_io, active_requests;
uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds
uint64_t timeout = 0, delay_in_usec = 1000; /* In micro Seconds */
rcb_t* rcb;
DBG_FUNC("IN\n");
@ -451,7 +403,7 @@ check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_de
do {
rcb = &softs->rcb[tag];
if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) {
OS_BUSYWAIT(delay_in_usec);
OS_SLEEP(delay_in_usec);
timeout += delay_in_usec;
}
else
@ -461,49 +413,25 @@ check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_de
return;
}
} while(tag);
}
inline uint64_t
pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/*Increment device active io count by one*/
return OS_ATOMIC64_INC(&device->active_requests);
#endif
}
inline uint64_t
pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/*Decrement device active io count by one*/
return OS_ATOMIC64_DEC(&device->active_requests);
#endif
}
extern inline uint64_t
pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
inline void
pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/* Reset device count to Zero */
OS_ATOMIC64_INIT(&device->active_requests, 0);
#endif
}
extern inline uint64_t
pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
inline uint64_t
pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/* read device active count*/
return OS_ATOMIC64_READ(&device->active_requests);
#endif
}
extern inline void
pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
extern inline uint64_t
pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device);
void
pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds
uint64_t timeout_in_usec = 0, delay_in_usec = 1000; /* In microseconds */
DBG_FUNC("IN\n");
@ -511,16 +439,16 @@ pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_
return;
#if PQISRC_DEVICE_IO_COUNTER
DBG_NOTE("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device));
DBG_WARN_BTL(device,"Device Outstanding IO count = %lu\n", pqisrc_read_device_active_io(softs, device));
while(pqisrc_read_device_active_io(softs, device)) {
OS_BUSYWAIT(delay_in_usec); // In microseconds
OS_BUSYWAIT(delay_in_usec); /* In microseconds */
if(!softs->ctrl_online) {
DBG_WARN("Controller Offline was detected.\n");
}
timeout_in_usec += delay_in_usec;
if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) {
DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n",
DBG_WARN_BTL(device,"timed out waiting for pending IO. DeviceOutStandingIo's=%lu\n",
pqisrc_read_device_active_io(softs, device));
return;
}

View File

@ -0,0 +1,66 @@
/*-
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _PQI_HELPER_H
#define _PQI_HELPER_H
inline uint64_t
pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/*Increment device active io count by one*/
return OS_ATOMIC64_INC(&device->active_requests);
#endif
}
inline uint64_t
pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/*Decrement device active io count by one*/
return OS_ATOMIC64_DEC(&device->active_requests);
#endif
}
inline void
pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/* Reset device count to Zero */
OS_ATOMIC64_INIT(&device->active_requests, 0);
#endif
}
inline uint64_t
pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
#if PQISRC_DEVICE_IO_COUNTER
/* read device active count*/
return OS_ATOMIC64_READ(&device->active_requests);
#endif
}
#endif /* _PQI_HELPER_H */

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -77,11 +77,10 @@
#include <vm/pmap.h>
#include "smartpqi_defines.h"
#include "smartpqi_structures.h"
#include "smartpqi_prototypes.h"
#include "smartpqi_ioctl.h"
#include "smartpqi_helper.h"
#endif // _PQI_INCLUDES_H
#endif /* _PQI_INCLUDES_H*/

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -26,9 +26,6 @@
#include "smartpqi_includes.h"
/* 5 mins timeout for quiesce */
#define PQI_QUIESCE_TIMEOUT 300000
/*
* Request the adapter to get PQI capabilities supported.
*/
@ -36,7 +33,7 @@ static int
pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
gen_adm_req_iu_t admin_req;
@ -49,7 +46,6 @@ pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
capability = os_mem_alloc(softs, sizeof(*capability));
if (!capability) {
DBG_ERR("Failed to allocate memory for capability\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
@ -57,7 +53,7 @@ pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
memset(&admin_resp, 0, sizeof(admin_resp));
memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
pqi_cap_dma_buf.tag = "pqi_cap_buf";
os_strlcpy(pqi_cap_dma_buf.tag, "pqi_cap_buf", sizeof(pqi_cap_dma_buf.tag));
pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
@ -110,6 +106,12 @@ pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
/* Not expecting these to change, could cause problems if they do */
ASSERT(softs->pqi_dev_cap.max_iq_elem_len == PQISRC_OP_MAX_ELEM_SIZE);
ASSERT(softs->pqi_dev_cap.min_iq_elem_len == PQISRC_OP_MIN_ELEM_SIZE);
ASSERT(softs->max_ib_iu_length_per_fw == PQISRC_MAX_SPANNING_IU_LENGTH);
ASSERT(softs->ib_spanning_supported == true);
os_mem_free(softs, (void *)capability,
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
@ -135,6 +137,7 @@ pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
void
pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
{
uint32_t num_req;
size_t size;
int i;
@ -152,6 +155,7 @@ pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
/*
* Allocate memory for rcb and SG descriptors.
* TODO : Sg list should be created separately
*/
static int
pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
@ -168,11 +172,11 @@ pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
/* Set maximum outstanding requests */
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
* The rcb will be accessed by using the tag as index
* As 0 tag index is not used, we need to allocate one extra.
* As 0 tag index is not used, we need to allocate one extra.
*/
softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
num_req = softs->max_outstanding_io + 1;
DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
DBG_INIT("Max Outstanding IO reset to %u\n", num_req);
alloc_size = num_req * sizeof(rcb_t);
@ -192,9 +196,10 @@ pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
prcb = &softs->rcb[1];
/* Initialize rcb */
for(i=1; i < num_req; i++) {
/* TODO:Here tag is local variable */
char tag[15];
sprintf(tag, "sg_dma_buf%d", i);
softs->sg_dma_desc[i].tag = tag;
os_strlcpy(softs->sg_dma_desc[i].tag, tag, sizeof(softs->sg_dma_desc[i].tag));
softs->sg_dma_desc[i].size = sg_buf_size;
softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
@ -231,7 +236,9 @@ pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
softs->intr_count, softs->num_cpus_online);
/* TODO : Get the number of IB and OB queues from OS layer */
if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
/* Share the event and Operational queue. */
softs->num_op_obq = 1;
@ -244,32 +251,33 @@ pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
softs->share_opq_and_eventq = false;
}
/* If the available interrupt count is more than one,
we dont need to share the interrupt for IO and event queue */
we dont need to share the interrupt for IO and event queue */
if (softs->intr_count > 1)
softs->share_opq_and_eventq = false;
DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
DBG_INIT("softs->num_op_obq : %u\n",softs->num_op_obq);
/* TODO : Reset the interrupt count based on number of queues*/
softs->num_op_raid_ibq = softs->num_op_obq;
softs->num_op_aio_ibq = softs->num_op_raid_ibq;
softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
softs->max_ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
softs->max_obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
if (softs->max_ib_iu_length_per_fw == 256 &&
softs->ob_spanning_supported) {
/* older f/w that doesn't actually support spanning. */
softs->max_ib_iu_length = softs->ibq_elem_size;
softs->max_ib_iu_length = softs->max_ibq_elem_size;
} else {
/* max. inbound IU length is an multiple of our inbound element size. */
softs->max_ib_iu_length =
(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
softs->ibq_elem_size;
softs->max_ib_iu_length = PQISRC_ROUND_DOWN(softs->max_ib_iu_length_per_fw,
softs->max_ibq_elem_size);
}
/* If Max. Outstanding IO came with Max. Spanning element count then,
needed elements per IO are multiplication of
Max.Outstanding IO and Max.Spanning element */
total_iq_elements = (softs->max_outstanding_io *
(softs->max_ib_iu_length / softs->ibq_elem_size));
(softs->max_ib_iu_length / softs->max_ibq_elem_size));
softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
@ -279,15 +287,23 @@ pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
softs->pqi_dev_cap.max_oq_elements);
softs->max_sg_per_iu = ((softs->max_ib_iu_length -
softs->ibq_elem_size) /
sizeof(sgt_t)) +
MAX_EMBEDDED_SG_IN_FIRST_IU;
/* spanning elements should be 9 (1152/128) */
softs->max_spanning_elems = softs->max_ib_iu_length/softs->max_ibq_elem_size;
ASSERT(softs->max_spanning_elems == PQISRC_MAX_SPANNING_ELEMS);
DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
/* max SGs should be 8 (128/16) */
softs->max_sg_per_single_iu_element = softs->max_ibq_elem_size / sizeof(sgt_t);
ASSERT(softs->max_sg_per_single_iu_element == MAX_EMBEDDED_SG_IN_IU);
/* max SGs for spanning cmd should be 68*/
softs->max_sg_per_spanning_cmd = (softs->max_spanning_elems - 1) * softs->max_sg_per_single_iu_element;
softs->max_sg_per_spanning_cmd += MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); /* 1152 per FW advertisement */
DBG_INIT("softs->num_elem_per_op_ibq: %u\n", softs->num_elem_per_op_ibq); /* 32 for xcal */
DBG_INIT("softs->num_elem_per_op_obq: %u\n", softs->num_elem_per_op_obq); /* 256 for xcal */
DBG_INIT("softs->max_spanning_elems: %d\n", softs->max_spanning_elems); /* 9 */
DBG_INIT("softs->max_sg_per_spanning_cmd: %u\n", softs->max_sg_per_spanning_cmd); /* 68 until we add AIO writes */
DBG_FUNC("OUT\n");
}
@ -384,293 +400,6 @@ pqisrc_check_pqimode(pqisrc_softstate_t *softs)
return ret;
}
/* PQI Feature processing */
static int
pqisrc_config_table_update(struct pqisrc_softstate *softs,
uint16_t first_section, uint16_t last_section)
{
pqi_vendor_general_request_t request;
int ret = PQI_STATUS_FAILURE;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
request.data.config_table_update.first_section = first_section;
request.data.config_table_update.last_section = last_section;
ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
return PQI_STATUS_FAILURE;
}
return PQI_STATUS_SUCCESS;
}
static inline
boolean_t pqi_is_firmware_feature_supported(
struct pqi_conf_table_firmware_features *firmware_feature_list,
unsigned int bit_position)
{
unsigned int byte_index;
byte_index = bit_position / BITS_PER_BYTE;
if (byte_index >= firmware_feature_list->num_elements)
return false;
return firmware_feature_list->features_supported[byte_index] &
(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
}
static inline
boolean_t pqi_is_firmware_feature_enabled(
struct pqi_conf_table_firmware_features *firmware_feature_list,
uint8_t *firmware_features_addr, unsigned int bit_position)
{
unsigned int byte_index;
uint8_t *feature_enabled_addr;
byte_index = (bit_position / BITS_PER_BYTE) +
(firmware_feature_list->num_elements * 2);
feature_enabled_addr = firmware_features_addr +
offsetof(struct pqi_conf_table_firmware_features,
features_supported) + byte_index;
return *feature_enabled_addr &
(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
}
static inline void
pqi_request_firmware_feature(
struct pqi_conf_table_firmware_features *firmware_feature_list,
unsigned int bit_position)
{
unsigned int byte_index;
byte_index = (bit_position / BITS_PER_BYTE) +
firmware_feature_list->num_elements;
firmware_feature_list->features_supported[byte_index] |=
(1 << (bit_position % BITS_PER_BYTE));
}
/* Update PQI config table firmware features section and inform the firmware */
static int
pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
struct pqi_conf_table_firmware_features *firmware_feature_list)
{
uint8_t *request_feature_addr;
void *request_feature_abs_addr;
request_feature_addr = firmware_feature_list->features_supported +
firmware_feature_list->num_elements;
request_feature_abs_addr = softs->fw_features_section_abs_addr +
(request_feature_addr - (uint8_t*)firmware_feature_list);
os_io_memcpy(request_feature_abs_addr, request_feature_addr,
firmware_feature_list->num_elements);
return pqisrc_config_table_update(softs,
PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
}
/* Check firmware has enabled the feature specified in the respective bit position. */
inline boolean_t
pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
{
uint16_t byte_index;
uint8_t *features_enabled_abs_addr;
byte_index = (bit_position / BITS_PER_BYTE) +
(firmware_feature_list->num_elements * 2);
features_enabled_abs_addr = softs->fw_features_section_abs_addr +
offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
return *features_enabled_abs_addr &
(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
}
static void
pqi_firmware_feature_status(struct pqisrc_softstate *softs,
struct pqi_firmware_feature *firmware_feature)
{
switch(firmware_feature->feature_bit) {
case PQI_FIRMWARE_FEATURE_OFA:
break;
case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
softs->timeout_in_passthrough = true;
break;
case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
softs->timeout_in_tmf = true;
break;
default:
DBG_NOTE("Nothing to do \n");
}
}
/* Firmware features supported by the driver */
static struct
pqi_firmware_feature pqi_firmware_features[] = {
{
.feature_name = "Support timeout for pass-through commands",
.feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
.feature_status = pqi_firmware_feature_status,
},
{
.feature_name = "Support timeout for LUN Reset TMF",
.feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
.feature_status = pqi_firmware_feature_status,
}
};
static void
pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
{
int rc;
struct pqi_conf_table_firmware_features *firmware_feature_list;
unsigned int i;
unsigned int num_features_requested;
firmware_feature_list = (struct pqi_conf_table_firmware_features*)
softs->fw_features_section_abs_addr;
/* Check features and request those supported by firmware and driver.*/
for (i = 0, num_features_requested = 0;
i < ARRAY_SIZE(pqi_firmware_features); i++) {
/* Firmware support it ? */
if (pqi_is_firmware_feature_supported(firmware_feature_list,
pqi_firmware_features[i].feature_bit)) {
pqi_request_firmware_feature(firmware_feature_list,
pqi_firmware_features[i].feature_bit);
pqi_firmware_features[i].supported = true;
num_features_requested++;
DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
pqi_firmware_features[i].feature_name);
} else {
DBG_NOTE("%s supported by driver, but not by current firmware\n",
pqi_firmware_features[i].feature_name);
}
}
if (num_features_requested == 0)
return;
rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
if (rc) {
DBG_ERR("Failed to update pqi config table\n");
return;
}
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (pqi_is_firmware_feature_enabled(firmware_feature_list,
softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].enabled = true;
DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
if(pqi_firmware_features[i].feature_status)
pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
}
}
}
/*
* Get the PQI configuration table parameters.
* Currently using for heart-beat counter scratch-pad register.
*/
int
pqisrc_process_config_table(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
uint32_t config_table_size;
uint32_t section_off;
uint8_t *config_table_abs_addr __unused;
struct pqi_conf_table *conf_table;
struct pqi_conf_table_section_header *section_hdr;
config_table_size = softs->pqi_cap.conf_tab_sz;
if (config_table_size < sizeof(*conf_table) ||
config_table_size > PQI_CONF_TABLE_MAX_LEN) {
DBG_ERR("Invalid PQI conf table length of %u\n",
config_table_size);
return ret;
}
conf_table = os_mem_alloc(softs, config_table_size);
if (!conf_table) {
DBG_ERR("Failed to allocate memory for PQI conf table\n");
return ret;
}
if (config_table_size < sizeof(conf_table) ||
config_table_size > PQI_CONF_TABLE_MAX_LEN) {
DBG_ERR("Invalid PQI conf table length of %u\n",
config_table_size);
goto out;
}
config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
softs->pqi_cap.conf_tab_off);
PCI_MEM_GET_BUF(softs, config_table_abs_addr,
softs->pqi_cap.conf_tab_off,
(uint8_t*)conf_table, config_table_size);
if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
sizeof(conf_table->sign)) != 0) {
DBG_ERR("Invalid PQI config signature\n");
goto out;
}
section_off = LE_32(conf_table->first_section_off);
while (section_off) {
if (section_off+ sizeof(*section_hdr) >= config_table_size) {
DBG_INFO("Reached end of PQI config table. Breaking off.\n");
break;
}
section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
switch (LE_16(section_hdr->section_id)) {
case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
case PQI_CONF_TABLE_SECTION_DEBUG:
break;
case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
pqisrc_process_firmware_features(softs);
break;
case PQI_CONF_TABLE_SECTION_HEARTBEAT:
softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
section_off +
offsetof(struct pqi_conf_table_heartbeat,
heartbeat_counter);
softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
softs->heartbeat_counter_off);
ret = PQI_STATUS_SUCCESS;
break;
default:
DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
LE_16(section_hdr->section_id));
break;
}
section_off = LE_16(section_hdr->next_section_off);
}
out:
os_mem_free(softs, (void *)conf_table,config_table_size);
return ret;
}
/* Wait for PQI reset completion for the adapter*/
int
pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
@ -721,6 +450,7 @@ pqi_reset(pqisrc_softstate_t *softs)
val |= SIS_PQI_RESET_QUIESCE;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(val));
OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
if (ret) {
DBG_ERR("failed with error %d during quiesce\n", ret);
@ -734,6 +464,7 @@ pqi_reset(pqisrc_softstate_t *softs)
PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
LE_32(pqi_reset_reg.all_bits));
OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
ret = pqisrc_wait_for_pqi_reset_completion(softs);
if (ret) {
@ -827,10 +558,9 @@ pqisrc_pqi_init(pqisrc_softstate_t *softs)
/* Create Operational queues */
ret = pqisrc_create_op_queues(softs);
if(ret) {
DBG_ERR("Failed to create op queue\n");
ret = PQI_STATUS_FAILURE;
goto err_create_opq;
}
DBG_ERR("Failed to create op queue\n");
goto err_create_opq;
}
softs->ctrl_online = true;
@ -851,13 +581,14 @@ pqisrc_pqi_init(pqisrc_softstate_t *softs)
return PQI_STATUS_FAILURE;
}
/* */
int
pqisrc_force_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
if (SIS_IS_KERNEL_PANIC(softs)) {
DBG_INIT("Controller FW is not running");
DBG_ERR("Controller FW is not running\n");
return PQI_STATUS_FAILURE;
}
@ -888,18 +619,22 @@ pqisrc_force_sis(pqisrc_softstate_t *softs)
return ret;
}
static int
/* 5 mins timeout for quiesce */
#define PQI_QUIESCE_TIMEOUT 300000
int
pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
{
int count = 0;
int ret = PQI_STATUS_SUCCESS;
DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
DBG_NOTE("softs->taglist.num_elem : %u",softs->taglist.num_elem);
if (softs->taglist.num_elem == softs->max_outstanding_io)
return ret;
else {
DBG_WARN("%d commands pending\n",
DBG_WARN("%u commands pending\n",
softs->max_outstanding_io - softs->taglist.num_elem);
while(1) {
@ -928,7 +663,7 @@ pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
return ret;
}
static void
void
pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
{
@ -938,7 +673,7 @@ pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
rcb = &softs->rcb[tag];
if(rcb->req_pending && is_internal_req(rcb)) {
rcb->status = REQUEST_FAILED;
rcb->status = PQI_STATUS_TIMEOUT;
rcb->req_pending = false;
}
}
@ -951,7 +686,7 @@ pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
void
pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
{
int i, ret;
int ret;
DBG_FUNC("IN\n");
@ -975,25 +710,10 @@ pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
softs->devlist_lockcreated = false;
}
for (i = 0; i < softs->num_op_raid_ibq; i++) {
/* OP RAID IB Q */
if(softs->op_raid_ib_q[i].lockcreated==true){
OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
softs->op_raid_ib_q[i].lockcreated = false;
}
/* OP AIO IB Q */
if(softs->op_aio_ib_q[i].lockcreated==true){
OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
softs->op_aio_ib_q[i].lockcreated = false;
}
}
/* Free Op queues */
os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
os_dma_mem_free(softs, &softs->op_obq_dma_mem);
os_dma_mem_free(softs, &softs->event_q_dma_mem);
/* Free all queues */
pqisrc_destroy_op_ib_queues(softs);
pqisrc_destroy_op_ob_queues(softs);
pqisrc_destroy_event_queue(softs);
/* Free rcb */
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
@ -1001,13 +721,8 @@ pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
/* Free request id lists */
pqisrc_destroy_taglist(softs,&softs->taglist);
if(softs->admin_ib_queue.lockcreated==true) {
OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
softs->admin_ib_queue.lockcreated = false;
}
/* Free Admin Queue */
os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
pqisrc_destroy_admin_queue(softs);
/* Switch back to SIS mode */
if (pqisrc_force_sis(softs)) {
@ -1017,6 +732,30 @@ pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
DBG_FUNC("OUT\n");
}
/*
* Function to do any sanity checks for OS macros
*/
void
sanity_check_os_behavior(pqisrc_softstate_t *softs)
{
#ifdef OS_ATOMIC64_INC
OS_ATOMIC64_T atomic_test_var = 0;
OS_ATOMIC64_T atomic_ret = 0;
atomic_ret = OS_ATOMIC64_INC(&atomic_test_var);
ASSERT(atomic_ret == 1);
atomic_ret = OS_ATOMIC64_INC(&atomic_test_var);
ASSERT(atomic_ret == 2);
atomic_ret = OS_ATOMIC64_DEC(&atomic_test_var);
ASSERT(atomic_ret == 1);
#else
DBG_INIT("OS needs to define/implement atomic macros\n");
#endif
}
/*
* Function to initialize the adapter settings.
*/
@ -1024,24 +763,53 @@ int
pqisrc_init(pqisrc_softstate_t *softs)
{
int ret = 0;
int i = 0, j = 0;
uint32_t ctrl_type;
DBG_FUNC("IN\n");
sanity_check_os_behavior(softs);
check_struct_sizes();
/*Get verbose flags, defined in OS code XX_debug.h or so*/
#ifdef DISABLE_ERR_RESP_VERBOSE
softs->err_resp_verbose = false;
#else
softs->err_resp_verbose = true;
#endif
/* prevent attachment of revA hardware. */
ctrl_type = PQI_GET_CTRL_TYPE(softs);
if (ctrl_type == PQI_CTRL_PRODUCT_ID_GEN2_REV_A) {
DBG_ERR("adapter at B.D.F=%u.%u.%u: unsupported RevA card.\n",
softs->bus_id, softs->device_id, softs->func_id);
ret = PQI_STATUS_FAILURE;
goto err_out;
}
/* Increment the global adapter ID and tie it to this BDF */
#ifdef OS_ATOMIC64_INC
static OS_ATOMIC64_T g_adapter_cnt = 0;
softs->adapter_num = (uint8_t)OS_ATOMIC64_INC(&g_adapter_cnt);
#else
static uint64_t g_adapter_cnt = 0;
softs->adapter_num = (uint8_t)++g_adapter_cnt;
#endif
DBG_NOTE("Initializing adapter %u\n", (uint32_t)softs->adapter_num);
ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
if(ret != PQI_STATUS_SUCCESS){
DBG_ERR(" Failed to initialize scan lock\n");
goto err_out;
}
/* Init the Sync interface */
ret = pqisrc_sis_init(softs);
if (ret) {
DBG_ERR("SIS Init failed with error %d\n", ret);
goto err_out;
goto err_sis;
}
ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
if(ret != PQI_STATUS_SUCCESS){
DBG_ERR(" Failed to initialize scan lock\n");
goto err_scan_lock;
}
/* Init the PQI interface */
ret = pqisrc_pqi_init(softs);
@ -1058,25 +826,25 @@ pqisrc_init(pqisrc_softstate_t *softs)
}
/* Report event configuration */
ret = pqisrc_report_event_config(softs);
if(ret){
DBG_ERR(" Failed to configure Report events\n");
ret = pqisrc_report_event_config(softs);
if(ret){
DBG_ERR(" Failed to configure Report events\n");
goto err_event;
}
/* Set event configuration*/
ret = pqisrc_set_event_config(softs);
if(ret){
DBG_ERR(" Failed to configure Set events\n");
goto err_event;
}
ret = pqisrc_set_event_config(softs);
if(ret){
DBG_ERR(" Failed to configure Set events\n");
goto err_event;
}
/* Check for For PQI spanning */
ret = pqisrc_get_ctrl_fw_version(softs);
if(ret){
DBG_ERR(" Failed to get ctrl fw version\n");
goto err_fw_version;
}
if(ret){
DBG_ERR(" Failed to get ctrl fw version\n");
goto err_fw_version;
}
/* update driver version in to FW */
ret = pqisrc_write_driver_version_to_host_wellness(softs);
@ -1085,6 +853,12 @@ pqisrc_init(pqisrc_softstate_t *softs)
goto err_host_wellness;
}
/* Setup sense features */
ret = pqisrc_QuerySenseFeatures(softs);
if (ret) {
DBG_ERR("Failed to get sense features\n");
goto err_sense;
}
os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
@ -1104,12 +878,8 @@ pqisrc_init(pqisrc_softstate_t *softs)
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
/* Init device list */
for(i = 0; i < PQI_MAX_DEVICES; i++)
for(j = 0; j < PQI_MAX_MULTILUN; j++)
softs->device_list[i][j] = NULL;
pqisrc_init_targetid_pool(softs);
memset(softs->dev_list, 0, sizeof(*softs->dev_list));
pqisrc_init_bitmap(softs);
DBG_FUNC("OUT\n");
return ret;
@ -1124,11 +894,12 @@ pqisrc_init(pqisrc_softstate_t *softs)
err_event:
err_host_wellness:
err_intr:
err_sense:
pqisrc_pqi_uninit(softs);
err_pqi:
os_destroy_semaphore(&softs->scan_lock);
err_scan_lock:
pqisrc_sis_uninit(softs);
err_sis:
os_destroy_semaphore(&softs->scan_lock);
err_out:
DBG_FUNC("OUT failed\n");
return ret;
@ -1162,16 +933,18 @@ pqisrc_flush_cache( pqisrc_softstate_t *softs,
memset(&request, 0, sizeof(request));
rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
(uint8_t *)RAID_CTLR_LUNID, NULL);
request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
request.cmd.bmic_cdb.op_code = BMIC_WRITE;
request.cmd.bmic_cdb.cmd = BMIC_CACHE_FLUSH;
request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*flush_buff));
rval = pqisrc_prepare_send_ctrlr_request(softs, &request, flush_buff, sizeof(*flush_buff));
if (rval) {
DBG_ERR("error in build send raid req ret=%d\n", rval);
}
if (flush_buff)
os_mem_free(softs, (void *)flush_buff,
sizeof(pqisrc_bmic_flush_cache_t));
os_mem_free(softs, (void *)flush_buff, sizeof(pqisrc_bmic_flush_cache_t));
DBG_FUNC("OUT\n");

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -26,6 +26,7 @@
#include "smartpqi_includes.h"
/*
* Function to get processor count
*/
@ -34,6 +35,7 @@ os_get_processor_config(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->num_cpus_online = mp_ncpus;
bsd_set_hint_adapter_cpu_config(softs);
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
@ -86,10 +88,10 @@ os_get_intr_config(pqisrc_softstate_t *softs)
softs->intr_count = 1;
}
DBG_FUNC("OUT\n");
error = bsd_status_to_pqi_status(BSD_SUCCESS);
DBG_FUNC("OUT\n");
return error;
}
@ -117,7 +119,7 @@ shared_ithread_routine(void *arg)
DBG_FUNC("IN\n");
if (softs == NULL)
if (!softs)
return;
pqisrc_process_response_queue(softs, oq_id);
@ -138,7 +140,7 @@ common_ithread_routine(void *arg)
DBG_FUNC("IN\n");
if (softs == NULL)
if (!softs)
return;
pqisrc_process_response_queue(softs, oq_id);
@ -155,7 +157,7 @@ event_ithread_routine(void *arg)
DBG_FUNC("IN\n");
if (softs == NULL)
if (!softs)
return;
pqisrc_process_event_intr_src(softs, oq_id);
@ -170,10 +172,12 @@ int
register_legacy_intr(pqisrc_softstate_t *softs)
{
int error = BSD_SUCCESS;
device_t dev = softs->os_specific.pqi_dev;
device_t dev;
DBG_FUNC("IN\n");
dev = softs->os_specific.pqi_dev;
softs->os_specific.pqi_irq_rid[0] = 0;
softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0],
@ -216,12 +220,13 @@ register_msix_intr(pqisrc_softstate_t *softs)
int i = 0;
device_t dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
size_t msix_size = sizeof(pqi_intr_ctx_t) * msix_count;
DBG_FUNC("IN\n");
softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count);
softs->os_specific.msi_ctx = os_mem_alloc(softs, msix_size);
if (!softs->os_specific.msi_ctx) {
DBG_ERR("Memory allocation failed\n");
DBG_ERR("Memory allocation failed, Requested memory:%lu bytes\n", (unsigned long)msix_size);
return ENXIO;
}
@ -282,7 +287,7 @@ register_msix_intr(pqisrc_softstate_t *softs)
return error;
}
softs->os_specific.intr_registered[i] = TRUE;
/* Add interrupt handlers*/
/* Add interrupt handlers*/
for (i = 1; i < msix_count; ++i) {
softs->os_specific.pqi_irq_rid[i] = i+1;
softs->os_specific.pqi_irq[i] = \
@ -335,7 +340,7 @@ os_setup_intr(pqisrc_softstate_t *softs)
bsd_status = register_msix_intr(softs);
}
if(bsd_status)
if (bsd_status)
DBG_WARN("interrupt registration is failed, error = %d\n", bsd_status);
pqi_status = bsd_status_to_pqi_status(bsd_status);
@ -422,8 +427,8 @@ os_destroy_intr(pqisrc_softstate_t *softs)
if (softs->os_specific.msi_enabled) {
pci_release_msi(dev);
softs->os_specific.msi_enabled = FALSE;
}
}
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -51,7 +51,7 @@ os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
}
/*
* Device open function for ioctl entry
* Device open function for ioctl entry
*/
static int
smartpqi_open(struct cdev *cdev, int flags, int devtype,
@ -81,10 +81,10 @@ smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
driver_info->major_version = PQISRC_OS_VERSION;
driver_info->minor_version = PQISRC_FEATURE_VERSION;
driver_info->release_version = PQISRC_PATCH_VERSION;
driver_info->build_revision = PQISRC_BUILD_VERSION;
driver_info->major_version = PQISRC_DRIVER_MAJOR;
driver_info->minor_version = PQISRC_DRIVER_MINOR;
driver_info->release_version = PQISRC_DRIVER_RELEASE;
driver_info->build_revision = PQISRC_DRIVER_REVISION;
driver_info->max_targets = PQI_MAX_DEVICES - 1;
driver_info->max_io = softs->max_io_for_scsi_ml;
driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
@ -117,6 +117,7 @@ smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
vendor = pci_get_vendor(dev);
device = pci_get_device(dev);
pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
DBG_FUNC("OUT\n");
}
@ -175,6 +176,7 @@ smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
}
DBG_FUNC("OUT error = %d\n", bsd_status);
return bsd_status;
}
@ -232,7 +234,7 @@ destroy_char_dev(struct pqisrc_softstate *softs)
int
pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
{
int ret = PQI_STATUS_SUCCESS;
int ret;
char *drv_buf = NULL;
uint32_t tag = 0;
IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
@ -246,18 +248,18 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
memset(&request, 0, sizeof(request));
memset(&error_info, 0, sizeof(error_info));
DBG_FUNC("IN");
DBG_FUNC("IN\n");
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
if (!arg)
return (PQI_STATUS_FAILURE);
return PQI_STATUS_FAILURE;
if (iocommand->buf_size < 1 &&
iocommand->Request.Type.Direction != PQIIOCTL_NONE)
return PQI_STATUS_FAILURE;
if (iocommand->Request.CDBLen > sizeof(request.cdb))
if (iocommand->Request.CDBLen > sizeof(request.cmd.cdb))
return PQI_STATUS_FAILURE;
switch (iocommand->Request.Type.Direction) {
@ -272,26 +274,23 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
if (iocommand->buf_size > 0) {
memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
os_strlcpy(ioctl_dma_buf.tag, "Ioctl_PassthruCmd_Buffer", sizeof(ioctl_dma_buf.tag));
ioctl_dma_buf.size = iocommand->buf_size;
ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
/* allocate memory */
ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
if (ret) {
DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
ret = PQI_STATUS_FAILURE;
goto out;
}
DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
DBG_IO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
DBG_IO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
drv_buf = (char *)ioctl_dma_buf.virt_addr;
if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
if ((ret = os_copy_from_user(softs, (void *)drv_buf,
(void *)iocommand->buf,
iocommand->buf_size, mode)) != 0) {
ret = PQI_STATUS_FAILURE;
ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode);
if (ret != 0) {
goto free_mem;
}
}
@ -302,7 +301,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
PQI_REQUEST_HEADER_LENGTH;
memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
sizeof(request.lun_number));
memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
memcpy(request.cmd.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
switch (iocommand->Request.Type.Direction) {
@ -330,7 +329,6 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
tag = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == tag) {
DBG_ERR("Tag not available\n");
ret = PQI_STATUS_FAILURE;
goto free_mem;
}
request.request_id = tag;
@ -352,8 +350,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
goto err_out;
}
ret = pqisrc_wait_on_condition(softs, rcb,
PQISRC_PASSTHROUGH_CMD_TIMEOUT);
ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_PASSTHROUGH_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Passthru IOCTL cmd timed out !!\n");
goto err_out;
@ -386,29 +383,29 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
iocommand->error_info.SenseLen = sense_data_length;
}
if (error_info.data_out_result ==
PQI_RAID_DATA_IN_OUT_UNDERFLOW){
rcb->status = REQUEST_SUCCESS;
if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
rcb->status = PQI_STATUS_SUCCESS;
}
}
if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
if (rcb->status == PQI_STATUS_SUCCESS && iocommand->buf_size > 0 &&
(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
DBG_ERR("Failed to copy the response\n");
goto err_out;
ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode);
if (ret != 0) {
DBG_ERR("Failed to copy the response\n");
goto err_out;
}
}
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
if (iocommand->buf_size > 0)
os_dma_mem_free(softs,&ioctl_dma_buf);
os_dma_mem_free(softs,&ioctl_dma_buf);
DBG_FUNC("OUT\n");
return ret;
return PQI_STATUS_SUCCESS;
err_out:
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -30,7 +30,7 @@
/* IOCTL passthrough macros and structures */
#define SENSEINFOBYTES 32 /* note that this value may vary
between host implementations */
between host implementations */
/* transfer direction */
#define PQIIOCTL_NONE 0x00

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -29,7 +29,6 @@
*/
#include "smartpqi_includes.h"
#include "smartpqi_prototypes.h"
CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS);
@ -60,6 +59,7 @@ struct pqi_ident
{0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
{0x9005, 0x028f, 0x9005, 0x659, PQI_HWIF_SRCV, "2100C8iOXS"},
/* (MSCC PM8221 8x12G based) */
{0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
@ -67,7 +67,7 @@ struct pqi_ident
{0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"},
{0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"},
{0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"},
{0x9005, 0x028f, 0x193d, 0x1109, PQI_HWIF_SRCV, "UN RAID P4408-Mr-8i-2GB"},
/* (MSCC PM8204 8x12G based) */
{0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
@ -81,14 +81,15 @@ struct pqi_ident
{0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
{0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"},
{0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"},
{0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR PM8204-2GB"},
{0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR PM8204-4GB"},
{0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "PM8204-2GB"},
{0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "PM8204-4GB"},
{0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"},
{0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"},
{0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"},
{0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"},
{0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"},
{0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"},
{0x9005, 0x028f, 0x1cc4, 0x0101, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8204"},
/* (MSCC PM8222 8x12G based) */
{0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
@ -105,12 +106,13 @@ struct pqi_ident
{0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"},
{0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"},
{0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"},
{0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR PM8222-SHBA"},
{0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "PM8222-SHBA"},
{0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"},
{0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "INSPUR PM8222-HBA"},
{0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "PM8222-HBA"},
{0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"},
{0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "INSPUR RS0800M5E8i"},
{0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "INSPUR RS0800M5H8i"},
{0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "RS0800M5E8i"},
{0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "RS0800M5H8i"},
{0x9005, 0x028f, 0x1cc4, 0x0201, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8222"},
/* (SRCx MSCC FVB 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
@ -124,17 +126,26 @@ struct pqi_ident
{0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"},
{0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"},
{0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"},
{0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8242-24i"},
{0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "SMART-HBA 8242-24i"},
{0x9005, 0x028f, 0x1bd4, 0x006B, PQI_HWIF_SRCV, "RS0800M5H24i"},
{0x9005, 0x028f, 0x1bd4, 0x0070, PQI_HWIF_SRCV, "RS0800M5E24i"},
/* (MSCC PM8236 16x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
{0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "RAID 8236-16i"},
{0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"},
{0x9005, 0x028f, 0x1cf2, 0x0B27, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B-18i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x0B45, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B_L-18i 2G"},
{0x9005, 0x028f, 0x1cf2, 0x5445, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241-18i 2G"},
{0x9005, 0x028f, 0x1cf2, 0x5446, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242-18i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x5449, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS241-18i 2G"},
{0x9005, 0x028f, 0x1cf2, 0x544A, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS242-18i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x544D, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241B-18i 2G"},
{0x9005, 0x028f, 0x1cf2, 0x544E, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242B-18i 4G"},
{0x9005, 0x028f, 0x1bd4, 0x006F, PQI_HWIF_SRCV, "RS0804M5R16i"},
/* (MSCC PM8237 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
@ -145,18 +156,24 @@ struct pqi_ident
{0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"},
{0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"},
{0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "SMART-HBA 8238-16i"},
{0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"},
{0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"},
{0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"},
{0x9005, 0x028f, 0x1cf2, 0x0B29, PQI_HWIF_SRCV, "ZTE SmartIOC2100 SDPSA/B_I-18i"},
{0x9005, 0x028f, 0x1cf2, 0x5447, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243-18i"},
{0x9005, 0x028f, 0x1cf2, 0x544B, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RS243-18i"},
{0x9005, 0x028f, 0x1cf2, 0x544F, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243B-18i"},
{0x9005, 0x028f, 0x1bd4, 0x0071, PQI_HWIF_SRCV, "RS0800M5H16i"},
{0x9005, 0x028f, 0x1bd4, 0x0072, PQI_HWIF_SRCV, "RS0800M5E16i"},
/* (MSCC PM8240 24x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
{0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"},
{0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
{0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
{0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"},
{0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "RAID 8240-24i"},
{0x9005, 0x028f, 0x1dfc, 0x3161, PQI_HWIF_SRCV, "NTCOM SAS3 RAID-24i"},
{0x9005, 0x028f, 0x1F0C, 0x3161, PQI_HWIF_SRCV, "NT RAID 3100-24i"},
/* Huawei ID's */
@ -166,22 +183,127 @@ struct pqi_ident
{0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"},
{0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"},
{0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"},
/* (MSCC PM8252 8x12G based) */
{0x9005, 0x028f, 0x193d, 0x110b, PQI_HWIF_SRCV, "UN HBA H4508-Mf-8i"},
{0x9005, 0x028f, 0x1bd4, 0x0052, PQI_HWIF_SRCV, "MT0801M6E"},
{0x9005, 0x028f, 0x1bd4, 0x0054, PQI_HWIF_SRCV, "MT0800M6H"},
{0x9005, 0x028f, 0x1bd4, 0x0086, PQI_HWIF_SRCV, "RT0800M7E"},
{0x9005, 0x028f, 0x1bd4, 0x0087, PQI_HWIF_SRCV, "RT0800M7H"},
{0x9005, 0x028f, 0x1f51, 0x1001, PQI_HWIF_SRCV, "SmartHBA P6600-8i"},
{0x9005, 0x028f, 0x1f51, 0x1003, PQI_HWIF_SRCV, "SmartHBA P6600-8e"},
{0x9005, 0x028f, 0x9005, 0x1460, PQI_HWIF_SRCV, "HBA 1200"},
{0x9005, 0x028f, 0x9005, 0x1461, PQI_HWIF_SRCV, "SmartHBA 2200"},
{0x9005, 0x028f, 0x9005, 0x1462, PQI_HWIF_SRCV, "HBA 1200-8i"},
/* (MSCC PM8254 32x12G based) */
{0x9005, 0x028f, 0x1bd4, 0x0051, PQI_HWIF_SRCV, "MT0804M6R"},
{0x9005, 0x028f, 0x1bd4, 0x0053, PQI_HWIF_SRCV, "MT0808M6R"},
{0x9005, 0x028f, 0x1bd4, 0x0088, PQI_HWIF_SRCV, "RT0804M7R"},
{0x9005, 0x028f, 0x1bd4, 0x0089, PQI_HWIF_SRCV, "RT0808M7R"},
{0x9005, 0x028f, 0x1f51, 0x1002, PQI_HWIF_SRCV, "SmartRAID P7604-8i"},
{0x9005, 0x028f, 0x1f51, 0x1004, PQI_HWIF_SRCV, "SmartRAID P7604-8e"},
{0x9005, 0x028f, 0x9005, 0x14a0, PQI_HWIF_SRCV, "SmartRAID 3254-8i"},
{0x9005, 0x028f, 0x9005, 0x14a1, PQI_HWIF_SRCV, "SmartRAID 3204-8i"},
{0x9005, 0x028f, 0x9005, 0x14a2, PQI_HWIF_SRCV, "SmartRAID 3252-8i"},
{0x9005, 0x028f, 0x9005, 0x14a4, PQI_HWIF_SRCV, "SmartRAID 3254-8i /e"},
{0x9005, 0x028f, 0x9005, 0x14a5, PQI_HWIF_SRCV, "SmartRAID 3252-8i /e"},
{0x9005, 0x028f, 0x9005, 0x14a6, PQI_HWIF_SRCV, "SmartRAID 3204-8i /e"},
/* (MSCC PM8265 16x12G based) */
/* (MSCC PM8262 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x14c0, PQI_HWIF_SRCV, "SmartHBA 2200-16i"},
{0x9005, 0x028f, 0x9005, 0x14c1, PQI_HWIF_SRCV, "HBA 1200-16i"},
{0x9005, 0x028f, 0x9005, 0x14c3, PQI_HWIF_SRCV, "HBA 1200-16e"},
{0x9005, 0x028f, 0x9005, 0x14c4, PQI_HWIF_SRCV, "HBA 1200-8e"},
{0x9005, 0x028f, 0x1f51, 0x1005, PQI_HWIF_SRCV, "SmartHBA P6600-16i"},
{0x9005, 0x028f, 0x1f51, 0x1007, PQI_HWIF_SRCV, "SmartHBA P6600-8i8e"},
{0x9005, 0x028f, 0x1f51, 0x1009, PQI_HWIF_SRCV, "SmartHBA P6600-16e"},
{0x9005, 0x028f, 0x1cf2, 0x54dc, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RM346-16i"},
{0x9005, 0x028f, 0x1cf2, 0x0806, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RS346-16i"},
/* (MSCC PM8264 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x14b0, PQI_HWIF_SRCV, "SmartRAID 3254-16i"},
{0x9005, 0x028f, 0x9005, 0x14b1, PQI_HWIF_SRCV, "SmartRAID 3258-16i"},
{0x9005, 0x028f, 0x1f51, 0x1006, PQI_HWIF_SRCV, "SmartRAID P7608-16i"},
{0x9005, 0x028f, 0x1f51, 0x1008, PQI_HWIF_SRCV, "SmartRAID P7608-8i8e"},
{0x9005, 0x028f, 0x1f51, 0x100a, PQI_HWIF_SRCV, "SmartRAID P7608-16e"},
{0x9005, 0x028f, 0x1cf2, 0x54da, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM344-16i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x54db, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM345-16i 8G"},
{0x9005, 0x028f, 0x1cf2, 0x0804, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS344-16i 4G"},
{0x9005, 0x028f, 0x1cf2, 0x0805, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS345-16i 8G"},
/* (MSCC PM8265 16x12G based) */
{0x9005, 0x028f, 0x1590, 0x02dc, PQI_HWIF_SRCV, "SR416i-a Gen10+"},
{0x9005, 0x028f, 0x9005, 0x1470, PQI_HWIF_SRCV, "SmartRAID 3200"},
{0x9005, 0x028f, 0x9005, 0x1471, PQI_HWIF_SRCV, "SmartRAID 3254-16i /e"},
{0x9005, 0x028f, 0x9005, 0x1472, PQI_HWIF_SRCV, "SmartRAID 3258-16i /e"},
{0x9005, 0x028f, 0x9005, 0x1473, PQI_HWIF_SRCV, "SmartRAID 3284-16io /e/uC"},
{0x9005, 0x028f, 0x9005, 0x1474, PQI_HWIF_SRCV, "SmartRAID 3254-16io /e"},
/* (MSCC PM8270 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1475, PQI_HWIF_SRCV, "SmartRAID 3254-16e /e"},
/* (MSCC PM8266 16x12G based) */
{0x9005, 0x028f, 0x1014, 0x0718, PQI_HWIF_SRCV, "IBM 4-Port 24G SAS"},
{0x9005, 0x028f, 0x9005, 0x1490, PQI_HWIF_SRCV, "HBA 1200p Ultra"},
{0x9005, 0x028f, 0x9005, 0x1491, PQI_HWIF_SRCV, "SmartHBA 2200p Ultra"},
{0x9005, 0x028f, 0x9005, 0x1402, PQI_HWIF_SRCV, "HBA Ultra 1200P-16i"},
{0x9005, 0x028f, 0x9005, 0x1441, PQI_HWIF_SRCV, "HBA Ultra 1200P-32i"},
/* (MSCC PM8268 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x14d0, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i"},
/* (MSCC PM8269 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1400, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i /e"},
/* (MSCC PM8270 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1410, PQI_HWIF_SRCV, "HBA Ultra 1200P-16e"},
{0x9005, 0x028f, 0x9005, 0x1411, PQI_HWIF_SRCV, "HBA 1200 Ultra"},
{0x9005, 0x028f, 0x9005, 0x1412, PQI_HWIF_SRCV, "SmartHBA 2200 Ultra"},
{0x9005, 0x028f, 0x9005, 0x1463, PQI_HWIF_SRCV, "SmartHBA 2200-8io /e"},
{0x9005, 0x028f, 0x9005, 0x14c2, PQI_HWIF_SRCV, "SmartHBA 2200-16io /e"},
/* (MSCC PM8271 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x14e0, PQI_HWIF_SRCV, "SmartIOC PM8271"},
/* (MSCC PM8272 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1420, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e"},
/* (MSCC PM8273 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1430, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e /e"},
/* (MSCC PM8274 16x12G based) */
{0x9005, 0x028f, 0x1e93, 0x1000, PQI_HWIF_SRCV, "ByteHBA JGH43024-8"},
{0x9005, 0x028f, 0x1e93, 0x1001, PQI_HWIF_SRCV, "ByteHBA JGH43034-8"},
{0x9005, 0x028f, 0x1e93, 0x1005, PQI_HWIF_SRCV, "ByteHBA JGH43014-8"},
/* (MSCC PM8275 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x14f0, PQI_HWIF_SRCV, "SmartIOC PM8275"},
/* (MSCC PM8276 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1480, PQI_HWIF_SRCV, "SmartRAID 3200 Ultra"},
{0x9005, 0x028f, 0x1e93, 0x1002, PQI_HWIF_SRCV, "ByteHBA JGH44014-8"},
/* (MSCC PM8278 16x12G based) */
{0x9005, 0x028f, 0x9005, 0x1440, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i"},
/* (MSCC PM8279 32x12G based) */
{0x9005, 0x028f, 0x9005, 0x1450, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i /e"},
{0x9005, 0x028f, 0x1590, 0x0294, PQI_HWIF_SRCV, "SR932i-p Gen10+"},
{0x9005, 0x028f, 0x1590, 0x0381, PQI_HWIF_SRCV, "SR932i-p Gen11"},
{0x9005, 0x028f, 0x1590, 0x0382, PQI_HWIF_SRCV, "SR308i-p Gen11"},
{0x9005, 0x028f, 0x1590, 0x0383, PQI_HWIF_SRCV, "SR308i-o Gen11"},
{0x9005, 0x028f, 0x1590, 0x02db, PQI_HWIF_SRCV, "SR416ie-m Gen11"},
{0x9005, 0x028f, 0x1590, 0x032e, PQI_HWIF_SRCV, "SR416i-o Gen11"},
{0x9005, 0x028f, 0x9005, 0x1452, PQI_HWIF_SRCV, "SmartRAID 3200p Ultra"},
/* (MSCC HBA/SMARTHBA/CFF SmartRAID - Lenovo 8X12G 16X12G based) */
{0x9005, 0x028f, 0x1d49, 0x0220, PQI_HWIF_SRCV, "4350-8i SAS/SATA HBA"},
{0x9005, 0x028f, 0x1d49, 0x0221, PQI_HWIF_SRCV, "4350-16i SAS/SATA HBA"},
{0x9005, 0x028f, 0x1d49, 0x0520, PQI_HWIF_SRCV, "5350-8i"},
{0x9005, 0x028f, 0x1d49, 0x0522, PQI_HWIF_SRCV, "5350-8i INTR"},
{0x9005, 0x028f, 0x1d49, 0x0620, PQI_HWIF_SRCV, "9350-8i 2GB Flash"},
{0x9005, 0x028f, 0x1d49, 0x0621, PQI_HWIF_SRCV, "9350-8i 2GB Flash INTR"},
{0x9005, 0x028f, 0x1d49, 0x0622, PQI_HWIF_SRCV, "9350-16i 4GB Flash"},
{0x9005, 0x028f, 0x1d49, 0x0623, PQI_HWIF_SRCV, "9350-16i 4GB Flash INTR"},
{0, 0, 0, 0, 0, 0}
};
@ -195,11 +317,26 @@ pqi_family_identifiers[] = {
/*
* Function to identify the installed adapter.
*/
static struct
pqi_ident *pqi_find_ident(device_t dev)
static struct pqi_ident *
pqi_find_ident(device_t dev)
{
struct pqi_ident *m;
u_int16_t vendid, devid, sub_vendid, sub_devid;
static long AllowWildcards = 0xffffffff;
int result;
#ifdef DEVICE_HINT
if (AllowWildcards == 0xffffffff)
{
result = resource_long_value("smartpqi", 0, "allow_wildcards", &AllowWildcards);
/* the default case if the hint is not found is to allow wildcards */
if (result != DEVICE_HINT_SUCCESS) {
AllowWildcards = 1;
}
}
#endif
vendid = pci_get_vendor(dev);
devid = pci_get_device(dev);
@ -216,7 +353,16 @@ pqi_ident *pqi_find_ident(device_t dev)
for (m = pqi_family_identifiers; m->vendor != 0; m++) {
if ((m->vendor == vendid) && (m->device == devid)) {
return (m);
if (AllowWildcards != 0)
{
DBG_NOTE("Controller device ID matched using wildcards\n");
return (m);
}
else
{
DBG_NOTE("Controller not probed because device ID wildcards are disabled\n")
return (NULL);
}
}
}
@ -253,6 +399,97 @@ pqisrc_save_controller_info(struct pqisrc_softstate *softs)
}
static void read_device_hint_resource(struct pqisrc_softstate *softs,
char *keyword, uint32_t *value)
{
DBG_FUNC("IN\n");
device_t dev = softs->os_specific.pqi_dev;
if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) {
if (*value) {
/* set resource to 1 for disabling the
* firmware feature in device hint file. */
*value = 0;
}
else {
/* set resource to 0 for enabling the
* firmware feature in device hint file. */
*value = 1;
}
}
else {
/* Enabled by default */
*value = 1;
}
DBG_NOTE("SmartPQI Device Hint: %s, Is it enabled = %u\n", keyword, *value);
DBG_FUNC("OUT\n");
}
static void read_device_hint_decimal_value(struct pqisrc_softstate *softs,
char *keyword, uint32_t *value)
{
DBG_FUNC("IN\n");
device_t dev = softs->os_specific.pqi_dev;
if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) {
/* Nothing to do here. Value reads
* directly from Device.Hint file */
}
else {
/* Set to max to determine the value */
*value = 0XFFFF;
}
DBG_FUNC("OUT\n");
}
static void smartpqi_read_all_device_hint_file_entries(struct pqisrc_softstate *softs)
{
uint32_t value = 0;
DBG_FUNC("IN\n");
/* hint.smartpqi.0.stream_disable = "0" */
read_device_hint_resource(softs, STREAM_DETECTION, &value);
softs->hint.stream_status = value;
/* hint.smartpqi.0.sata_unique_wwn_disable = "0" */
read_device_hint_resource(softs, SATA_UNIQUE_WWN, &value);
softs->hint.sata_unique_wwn_status = value;
/* hint.smartpqi.0.aio_raid1_write_disable = "0" */
read_device_hint_resource(softs, AIO_RAID1_WRITE_BYPASS, &value);
softs->hint.aio_raid1_write_status = value;
/* hint.smartpqi.0.aio_raid5_write_disable = "0" */
read_device_hint_resource(softs, AIO_RAID5_WRITE_BYPASS, &value);
softs->hint.aio_raid5_write_status = value;
/* hint.smartpqi.0.aio_raid6_write_disable = "0" */
read_device_hint_resource(softs, AIO_RAID6_WRITE_BYPASS, &value);
softs->hint.aio_raid6_write_status = value;
/* hint.smartpqi.0.queue_depth = "0" */
read_device_hint_decimal_value(softs, ADAPTER_QUEUE_DEPTH, &value);
softs->hint.queue_depth = value;
/* hint.smartpqi.0.sg_count = "0" */
read_device_hint_decimal_value(softs, SCATTER_GATHER_COUNT, &value);
softs->hint.sg_segments = value;
/* hint.smartpqi.0.queue_count = "0" */
read_device_hint_decimal_value(softs, QUEUE_COUNT, &value);
softs->hint.cpu_count = value;
DBG_FUNC("IN\n");
}
/*
* Allocate resources for our device, set up the bus interface.
* Initialize the PQI related functionality, scan devices, register sim to
@ -261,7 +498,7 @@ pqisrc_save_controller_info(struct pqisrc_softstate *softs)
static int
smartpqi_attach(device_t dev)
{
struct pqisrc_softstate *softs = NULL;
struct pqisrc_softstate *softs;
struct pqi_ident *id = NULL;
int error = BSD_SUCCESS;
u_int32_t command = 0, i = 0;
@ -368,6 +605,8 @@ smartpqi_attach(device_t dev)
softs->os_specific.sim_registered = FALSE;
softs->os_name = "FreeBSD ";
smartpqi_read_all_device_hint_file_entries(softs);
/* Initialize the PQI library */
error = pqisrc_init(softs);
if (error != PQI_STATUS_SUCCESS) {
@ -379,17 +618,17 @@ smartpqi_attach(device_t dev)
error = BSD_SUCCESS;
}
mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
softs->os_specific.mtx_init = TRUE;
mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
softs->os_specific.mtx_init = TRUE;
mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
callout_init(&softs->os_specific.wellness_periodic, 1);
callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
callout_init(&softs->os_specific.wellness_periodic, 1);
callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
/*
* Create DMA tag for mapping buffers into controller-addressable space.
*/
if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
/*
* Create DMA tag for mapping buffers into controller-addressable space.
*/
if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
PAGE_SIZE, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@ -409,7 +648,7 @@ smartpqi_attach(device_t dev)
for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) {
if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) {
DBG_ERR("Cant create datamap for buf @"
"rcbp = %p maxio = %d error = %d\n",
"rcbp = %p maxio = %u error = %d\n",
rcbp, softs->pqi_cap.max_outstanding_io, error);
goto dma_out;
}
@ -425,6 +664,9 @@ smartpqi_attach(device_t dev)
error = ENXIO;
goto out;
}
else {
error = BSD_SUCCESS;
}
error = register_sim(softs, card_index);
if (error) {
@ -443,6 +685,7 @@ smartpqi_attach(device_t dev)
card_index, error);
goto out;
}
goto out;
dma_out:
@ -452,6 +695,7 @@ smartpqi_attach(device_t dev)
softs->os_specific.pqi_regs_res0);
out:
DBG_FUNC("OUT error = %d\n", error);
return(error);
}
@ -479,6 +723,8 @@ smartpqi_detach(device_t dev)
if (rval != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to flush adapter cache! rval = %d\n", rval);
rval = EIO;
} else {
rval = BSD_SUCCESS;
}
}
@ -558,10 +804,11 @@ smartpqi_shutdown(device_t dev)
}
DBG_FUNC("OUT\n");
return bsd_status;
}
/*
* PCI bus interface.
*/

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -168,9 +168,9 @@ os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
void
*os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
{
void *addr = NULL;
void *addr;
/* DBG_FUNC("IN\n"); */
/* DBG_FUNC("IN\n"); */
addr = malloc((unsigned long)size, M_SMARTPQI,
M_NOWAIT | M_ZERO);
@ -184,7 +184,7 @@ void
* Mem resource deallocation wrapper function
*/
void
os_mem_free(pqisrc_softstate_t *softs, char *addr, size_t size)
os_mem_free(pqisrc_softstate_t *softs, void *addr, size_t size)
{
/* DBG_FUNC("IN\n"); */
@ -205,6 +205,6 @@ os_resource_free(pqisrc_softstate_t *softs)
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev,
SYS_RES_MEMORY,
softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -27,13 +27,13 @@
#include "smartpqi_includes.h"
/*
* Populate hostwellness time variables in bcd format from FreeBSD format
* Populate hostwellness time variables in bcd format from FreeBSD format.
*/
void
os_get_time(struct bmic_host_wellness_time *host_wellness_time)
{
struct timespec ts;
struct clocktime ct;
struct clocktime ct = {0};
getnanotime(&ts);
clock_ts_to_ct(&ts, &ct);
@ -111,8 +111,9 @@ int
os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
char *lockname)
{
mtx_init(lock, lockname, NULL, MTX_SPIN);
return 0;
mtx_init(lock, lockname, NULL, MTX_SPIN);
return 0;
}
/*
@ -180,3 +181,132 @@ bsd_status_to_pqi_status(int bsd_status)
else
return PQI_STATUS_FAILURE;
}
/* Return true : If the feature is disabled from device hints.
* Return false : If the feature is enabled from device hints.
* Return default: The feature status is not deciding from hints.
* */
boolean_t
check_device_hint_status(struct pqisrc_softstate *softs, unsigned int feature_bit)
{
DBG_FUNC("IN\n");
switch(feature_bit) {
case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
if (!softs->hint.aio_raid1_write_status)
return true;
break;
case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
if (!softs->hint.aio_raid5_write_status)
return true;
break;
case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
if (!softs->hint.aio_raid6_write_status)
return true;
break;
case PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN:
if (!softs->hint.sata_unique_wwn_status)
return true;
break;
default:
return false;
}
DBG_FUNC("OUT\n");
return false;
}
static void
bsd_set_hint_adapter_queue_depth(struct pqisrc_softstate *softs)
{
uint32_t queue_depth = softs->pqi_cap.max_outstanding_io;
DBG_FUNC("IN\n");
if ((!softs->hint.queue_depth) || (softs->hint.queue_depth >
softs->pqi_cap.max_outstanding_io)) {
/* Nothing to do here. Supported queue depth
* is already set by controller/driver */
}
else if (softs->hint.queue_depth < PQISRC_MIN_OUTSTANDING_REQ) {
/* Nothing to do here. Supported queue depth
* is already set by controller/driver */
}
else {
/* Set Device.Hint queue depth here */
softs->pqi_cap.max_outstanding_io =
softs->hint.queue_depth;
}
DBG_NOTE("Adapter queue depth before hint set = %u, Queue depth after hint set = %u\n",
queue_depth, softs->pqi_cap.max_outstanding_io);
DBG_FUNC("OUT\n");
}
static void
bsd_set_hint_scatter_gather_config(struct pqisrc_softstate *softs)
{
uint32_t pqi_sg_segments = softs->pqi_cap.max_sg_elem;
DBG_FUNC("IN\n");
/* At least > 16 sg's required to wotk hint correctly.
* Default the sg count set by driver/controller. */
if ((!softs->hint.sg_segments) || (softs->hint.sg_segments >
softs->pqi_cap.max_sg_elem)) {
/* Nothing to do here. Supported sg count
* is already set by controller/driver. */
}
else if (softs->hint.sg_segments < BSD_MIN_SG_SEGMENTS)
{
/* Nothing to do here. Supported sg count
* is already set by controller/driver. */
}
else {
/* Set Device.Hint sg count here */
softs->pqi_cap.max_sg_elem = softs->hint.sg_segments;
}
DBG_NOTE("SG segments before hint set = %u, SG segments after hint set = %u\n",
pqi_sg_segments, softs->pqi_cap.max_sg_elem);
DBG_FUNC("OUT\n");
}
void
bsd_set_hint_adapter_cap(struct pqisrc_softstate *softs)
{
DBG_FUNC("IN\n");
bsd_set_hint_adapter_queue_depth(softs);
bsd_set_hint_scatter_gather_config(softs);
DBG_FUNC("OUT\n");
}
void
bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *softs)
{
DBG_FUNC("IN\n");
/* online cpu count decides the no.of queues the driver can create,
* and msi interrupt count as well.
* If the cpu count is "zero" set by hint file then the driver
* can have "one" queue and "one" legacy interrupt. (It shares event queue for
* operational IB queue).
* Check for os_get_intr_config function for interrupt assignment.*/
if (softs->hint.cpu_count > softs->num_cpus_online) {
/* Nothing to do here. Supported cpu count
* already fetched from hardware */
}
else {
/* Set Device.Hint cpu count here */
softs->num_cpus_online = softs->hint.cpu_count;
}
DBG_FUNC("OUT\n");
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -29,15 +29,19 @@
/* Function prototypes */
/*pqi_init.c */
/*smartpqi_init.c */
int pqisrc_init(pqisrc_softstate_t *);
void pqisrc_uninit(pqisrc_softstate_t *);
void pqisrc_pqi_uninit(pqisrc_softstate_t *);
int pqisrc_process_config_table(pqisrc_softstate_t *);
int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type);
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *);
int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *);
void pqisrc_complete_internal_cmds(pqisrc_softstate_t *);
void sanity_check_os_behavior(pqisrc_softstate_t *);
/* pqi_sis.c*/
/* smartpqi_sis.c*/
int pqisrc_sis_init(pqisrc_softstate_t *);
void pqisrc_sis_uninit(pqisrc_softstate_t *);
int pqisrc_reenable_sis(pqisrc_softstate_t *);
@ -50,17 +54,21 @@ int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t);
void sis_disable_interrupt(pqisrc_softstate_t*);
/* pqi_queue.c */
/* smartpqi_queue.c */
int pqisrc_submit_admin_req(pqisrc_softstate_t *,
gen_adm_req_iu_t *, gen_adm_resp_iu_t *);
int pqisrc_create_admin_queue(pqisrc_softstate_t *);
int pqisrc_destroy_admin_queue(pqisrc_softstate_t *);
int pqisrc_create_op_queues(pqisrc_softstate_t *);
int pqisrc_allocate_and_init_inbound_q(pqisrc_softstate_t *, ib_queue_t *,
char *);
int pqisrc_allocate_and_init_outbound_q(pqisrc_softstate_t *, ob_queue_t *,
char *);
/* pqi_cmd.c */
/* smartpqi_cmd.c */
int pqisrc_submit_cmnd(pqisrc_softstate_t *,ib_queue_t *,void *);
/* pqi_tag.c */
/* smartpqi_tag.c */
#ifndef LOCKFREE_STACK
int pqisrc_init_taglist(pqisrc_softstate_t *,pqi_taglist_t *,uint32_t);
void pqisrc_destroy_taglist(pqisrc_softstate_t *,pqi_taglist_t *);
@ -73,26 +81,37 @@ void pqisrc_put_tag(lockless_stack_t *,uint32_t);
uint32_t pqisrc_get_tag(lockless_stack_t *);
#endif /* LOCKFREE_STACK */
/* pqi_discovery.c */
/* smartpqi_discovery.c */
void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
uint8_t *scsi3addr);
int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *);
int pqisrc_rescan_devices(pqisrc_softstate_t *);
int pqisrc_scan_devices(pqisrc_softstate_t *);
void pqisrc_process_raid_path_io_response(pqisrc_softstate_t *, uint16_t, struct pqi_io_response *);
void pqisrc_process_io_error_response(pqisrc_softstate_t *, int, uint16_t, struct pqi_io_response *);
void pqisrc_cleanup_devices(pqisrc_softstate_t *);
void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device);
void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device);
void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs);
int pqisrc_alloc_tid(pqisrc_softstate_t *softs);
void pqisrc_free_tid(pqisrc_softstate_t *softs, int);
void pqisrc_init_bitmap(pqisrc_softstate_t *softs);
void pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target);
int pqisrc_find_avail_target(pqisrc_softstate_t *softs);
int pqisrc_find_device_list_index(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device);
int pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
int bus, int target, int lun);
int pqisrc_delete_softs_entry(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device);
int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
reportlun_data_ext_t **buff, size_t *data_length);
reportlun_data_ext_t **buff, size_t *data_length);
int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len);
int pqisrc_simple_dma_alloc(pqisrc_softstate_t *, struct dma_mem *, size_t,
sgt_t *);
int pqisrc_prepare_send_raid(pqisrc_softstate_t *, pqisrc_raid_req_t *,
void *, size_t , uint8_t *, raid_path_error_info_elem_t *);
/* pqi_helper.c */
/* smartpqi_helper.c */
boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
@ -109,14 +128,13 @@ void pqisrc_configure_legacy_intx(pqisrc_softstate_t*, boolean_t);
void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *);
void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
int pqisrc_QuerySenseFeatures(pqisrc_softstate_t *);
void check_device_pending_commands_to_complete(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
/* pqi_response.c */
void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb);
/* smartpqi_response.c */
void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *,
rcb_t *);
void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *,
@ -129,39 +147,73 @@ void pqisrc_process_aio_response_error(pqisrc_softstate_t *,
void pqisrc_process_raid_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_response_queue(pqisrc_softstate_t *, int);
void pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb,
aio_path_error_info_elem_t *aio_err);
void pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb,
raid_path_error_info_elem_t *aio_err);
boolean_t suppress_innocuous_error_prints(pqisrc_softstate_t *softs,
rcb_t *rcb);
uint8_t pqisrc_get_cmd_from_rcb(rcb_t *);
boolean_t pqisrc_is_innocuous_error(pqisrc_softstate_t *, rcb_t *, void *);
/* pqi_request.c */
int pqisrc_build_send_vendor_request(pqisrc_softstate_t*,
pqi_vendor_general_request_t *,
raid_path_error_info_elem_t *);
/* smartpqi_request.c */
int pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs,
struct pqi_vendor_general_request *request);
int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*);
int pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb);
int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
rcb_t *, rcb_t *, int);
int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
extern inline void pqisrc_aio_build_cdb(aio_req_locator_t *, uint32_t,
rcb_t *, uint8_t *);
extern inline boolean_t pqisrc_aio_req_too_big(pqisrc_softstate_t *, pqi_scsi_dev_t *,
rcb_t *, aio_req_locator_t *, uint32_t);
void pqisrc_build_aio_common(pqisrc_softstate_t *, pqi_aio_req_t *,
rcb_t *, uint32_t);
void pqisrc_build_aio_R1_write(pqisrc_softstate_t *,
pqi_aio_raid1_write_req_t *, rcb_t *, uint32_t);
void pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *,
pqi_aio_raid5or6_write_req_t *, rcb_t *, uint32_t);
void pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb);
void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags);
void pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info);
void pqisrc_show_aio_io(pqisrc_softstate_t *, rcb_t *,
pqi_aio_req_t *, uint32_t);
void pqisrc_show_aio_common(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *);
void pqisrc_show_aio_R1_write(pqisrc_softstate_t *, rcb_t *,
pqi_aio_raid1_write_req_t *);
void pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *, rcb_t *,
pqi_aio_raid5or6_write_req_t *);
boolean_t pqisrc_cdb_is_write(uint8_t *);
void print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg);
void print_all_counters(pqisrc_softstate_t *softs, uint32_t flags);
char *io_path_to_ascii(IO_PATH_T path);
void int_to_scsilun(uint64_t, uint8_t *);
boolean_t pqisrc_cdb_is_read(uint8_t *);
void pqisrc_build_aio_io(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *, uint32_t);
uint8_t pqisrc_get_aio_data_direction(rcb_t *);
uint8_t pqisrc_get_raid_data_direction(rcb_t *);
void dump_tmf_details(pqisrc_softstate_t *, rcb_t *, char *);
io_type_t get_io_type_from_cdb(uint8_t *);
OS_ATOMIC64_T increment_this_counter(io_counters_t *, IO_PATH_T , io_type_t );
boolean_t
is_buffer_zero(void *, uint32_t );
/* pqi_event.c*/
/* smartpqi_event.c*/
int pqisrc_report_event_config(pqisrc_softstate_t *);
int pqisrc_set_event_config(pqisrc_softstate_t *);
int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int);
void pqisrc_ack_all_events(void *arg);
void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs);
boolean_t pqisrc_update_scsi_sense(const uint8_t *, int,
struct sense_header_scsi *);
int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *,
void *, size_t, uint8_t, uint16_t, uint8_t *,
raid_path_error_info_elem_t *);
int pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
void *buff, size_t datasize);
int pqisrc_submit_management_req(pqisrc_softstate_t *,
pqi_event_config_request_t *);
@ -185,102 +237,86 @@ void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *);
int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *);
int pqisrc_create_delete_adminq(pqisrc_softstate_t *, uint32_t);
void pqisrc_print_adminq_config(pqisrc_softstate_t *);
int pqisrc_delete_op_queue(pqisrc_softstate_t *,
uint32_t, boolean_t);
int pqisrc_delete_op_queue(pqisrc_softstate_t *, uint32_t, boolean_t);
void pqisrc_destroy_event_queue(pqisrc_softstate_t *);
void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *);
void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *);
int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *,
ib_queue_t *, uint32_t);
int pqisrc_create_op_obq(pqisrc_softstate_t *,
ob_queue_t *);
int pqisrc_create_op_ibq(pqisrc_softstate_t *,
ib_queue_t *);
int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, ib_queue_t *,
uint32_t);
int pqisrc_create_op_obq(pqisrc_softstate_t *, ob_queue_t *);
int pqisrc_create_op_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *);
int pqisrc_process_task_management_response(pqisrc_softstate_t *,
pqi_tmf_resp_t *);
/*Device outstanding Io count*/
uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
void pqisrc_init_device_active_io(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *,
pqi_scsi_dev_t *);
/* pqi_ioctl.c*/
int
pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
/* smartpqi_ioctl.c*/
int pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
/* Functions Prototypes */
/* FreeBSD_mem.c */
/* smartpqi_mem.c */
int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *);
void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *);
void *os_mem_alloc(pqisrc_softstate_t *,size_t);
void os_mem_free(pqisrc_softstate_t *,char *,size_t);
void os_mem_free(pqisrc_softstate_t *,void *,size_t);
void os_resource_free(pqisrc_softstate_t *);
int os_dma_setup(pqisrc_softstate_t *);
int os_dma_destroy(pqisrc_softstate_t *);
void os_update_dma_attributes(pqisrc_softstate_t *);
/* FreeBSD intr.c */
/* smartpqi_intr.c */
int os_get_intr_config(pqisrc_softstate_t *);
int os_setup_intr(pqisrc_softstate_t *);
int os_destroy_intr(pqisrc_softstate_t *);
int os_get_processor_config(pqisrc_softstate_t *);
void os_free_intr_config(pqisrc_softstate_t *);
/* FreeBSD_ioctl.c */
/* smartpqi_ioctl.c */
int os_copy_to_user(struct pqisrc_softstate *, void *,
void *, int, int);
int os_copy_from_user(struct pqisrc_softstate *, void *,
void *, int, int);
int create_char_dev(struct pqisrc_softstate *, int);
void destroy_char_dev(struct pqisrc_softstate *);
/* FreeBSD_misc.c*/
/* smartpqi_misc.c*/
int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *);
void os_uninit_spinlock(struct mtx *);
int os_create_semaphore(const char *, int,struct sema *);
int os_destroy_semaphore(struct sema *);
void os_sema_lock(struct sema *);
void os_sema_unlock(struct sema *);
void bsd_set_hint_adapter_cap(struct pqisrc_softstate *);
void bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *);
int os_strlcpy(char *dst, char *src, int len);
void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *);
void os_stop_heartbeat_timer(pqisrc_softstate_t *);
void os_start_heartbeat_timer(void *);
/* FreeBSD_cam.c */
/* smartpqi_cam.c */
uint8_t os_get_task_attr(rcb_t *);
void smartpqi_target_rescan(struct pqisrc_softstate *);
void os_rescan_target(struct pqisrc_softstate *, pqi_scsi_dev_t *);
/* FreeBSD_intr.c FreeBSD_main.c */
/* smartpqi_intr.c smartpqi_main.c */
void pqisrc_event_worker(void *, int);
void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_io_response_success(rcb_t *);
void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *);
boolean_t check_device_hint_status(struct pqisrc_softstate *, unsigned int );
void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t );
void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *);
void os_wellness_periodic(void *);
void os_reset_rcb( rcb_t *);
int register_sim(struct pqisrc_softstate *, int);
void deregister_sim(struct pqisrc_softstate *);
int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *,
int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *,
uint32_t *);
int register_legacy_intr(pqisrc_softstate_t *);
int register_msix_intr(pqisrc_softstate_t *);
@ -292,5 +328,4 @@ void pqisrc_save_controller_info(struct pqisrc_softstate *);
/* Domain status conversion */
int bsd_status_to_pqi_status(int );
#endif // _SMARTPQI_PROTOTYPES_H
#endif

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -28,6 +28,7 @@
/*
* Submit an admin IU to the adapter.
* TODO : Admin command implemented using polling,
* Add interrupt support, if required
*/
int
@ -120,10 +121,10 @@ pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16;
softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16;
DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n",
softs->admin_ib_queue.num_elem);
DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n",
softs->admin_ib_queue.elem_size);
DBG_INIT(" admin ib: num_elem=%u elem_size=%u\n",
softs->admin_ib_queue.num_elem, softs->admin_ib_queue.elem_size);
DBG_INIT(" admin ob: num_elem=%u elem_size=%u\n",
softs->admin_ob_queue.num_elem, softs->admin_ob_queue.elem_size);
}
/*
@ -142,79 +143,66 @@ pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
}
/*
* Allocate DMA memory for admin queue and initialize.
* Allocate DMA memory for inbound queue and initialize.
*/
int
pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
pqisrc_allocate_and_init_inbound_q(pqisrc_softstate_t *softs, ib_queue_t *ib_q, char *tag)
{
struct dma_mem *dma_mem = &ib_q->alloc_dma;
uint32_t ib_array_size = 0;
uint32_t ob_array_size = 0;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
int ret = PQI_STATUS_SUCCESS;
ib_array_size = (softs->admin_ib_queue.num_elem *
softs->admin_ib_queue.elem_size);
ib_array_size = ib_q->num_elem * ib_q->elem_size;
ASSERT(ib_array_size > 0);
ob_array_size = (softs->admin_ob_queue.num_elem *
softs->admin_ob_queue.elem_size);
alloc_size = ib_array_size + PQI_CI_PI_ALIGN + PQI_ADDR_ALIGN; /* for IB CI and OB PI */
alloc_size = ib_array_size + ob_array_size +
2 * sizeof(uint32_t) + PQI_ADDR_ALIGN_MASK_64 + 1; /* for IB CI and OB PI */
/* Allocate memory for Admin Q */
softs->admin_queue_dma_mem.tag = "admin_queue";
softs->admin_queue_dma_mem.size = alloc_size;
softs->admin_queue_dma_mem.align = PQI_ADMINQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->admin_queue_dma_mem);
/* Allocate memory for the Q */
memset(dma_mem, 0, sizeof(*dma_mem));
os_strlcpy(dma_mem->tag, tag, sizeof(dma_mem->tag));
dma_mem->size = alloc_size;
dma_mem->align = PQI_ADDR_ALIGN;
ret = os_dma_mem_alloc(softs, &ib_q->alloc_dma);
if (ret) {
DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
DBG_ERR("Failed to Allocate Q tag=%s ret=%d\n", dma_mem->tag, ret);
goto err_out;
}
DBG_INIT("alloc tag=%s size=0x%x align=0x%x virt_addr=%p dma_addr=%p\n",
dma_mem->tag, dma_mem->size, dma_mem->align, dma_mem->virt_addr, (void*)dma_mem->dma_addr);
/* Setup the address */
virt_addr = softs->admin_queue_dma_mem.virt_addr;
dma_addr = softs->admin_queue_dma_mem.dma_addr;
virt_addr = dma_mem->virt_addr;
dma_addr = dma_mem->dma_addr;
ASSERT(!((uint64_t)virt_addr & PQI_ADDR_ALIGN_MASK));
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK));
/* IB */
softs->admin_ib_queue.q_id = 0;
softs->admin_ib_queue.array_virt_addr = virt_addr;
softs->admin_ib_queue.array_dma_addr = dma_addr;
softs->admin_ib_queue.pi_local = 0;
/* OB */
softs->admin_ob_queue.q_id = 0;
softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size;
softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size;
softs->admin_ob_queue.ci_local = 0;
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK));
ib_q->array_virt_addr = virt_addr;
ib_q->array_dma_addr = dma_addr;
ib_q->pi_local = 0;
/* update addr for the next user */
virt_addr += ib_array_size;
dma_addr += ib_array_size;
/* IB CI */
softs->admin_ib_queue.ci_virt_addr =
(uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ ob_array_size);
softs->admin_ib_queue.ci_dma_addr =
(dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
ob_array_size);
ASSERT(!(dma_addr & PQI_CI_PI_ALIGN_MASK));
ib_q->ci_virt_addr = (uint32_t*)virt_addr;
ib_q->ci_dma_addr = dma_addr;
/* OB PI */
softs->admin_ob_queue.pi_virt_addr =
(uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
softs->admin_ob_queue.pi_dma_addr =
(dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
/* update addr for the next user */
virt_addr += PQI_CI_PI_ALIGN;
DBG_INIT("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n",
(void*)softs->admin_ib_queue.ci_dma_addr, (void*)softs->admin_ob_queue.pi_dma_addr );
DBG_INIT("ib_q: virt_addr=%p, ci_dma_addr=%p elem=%u size=%u\n",
ib_q->array_virt_addr, (void*)ib_q->ci_dma_addr, ib_q->num_elem, ib_array_size);
/* Verify alignment */
ASSERT(!(softs->admin_ib_queue.array_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
ASSERT(!(softs->admin_ib_queue.ci_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
ASSERT(!(softs->admin_ob_queue.array_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
ASSERT(!(softs->admin_ob_queue.pi_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
/* Verify we aren't out of bounds from allocation */
ASSERT(virt_addr <= ((char*)dma_mem->virt_addr + alloc_size));
DBG_FUNC("OUT\n");
return ret;
@ -224,6 +212,106 @@ pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
return PQI_STATUS_FAILURE;
}
/*
* Allocate DMA memory for outbound queue and initialize.
*/
int
pqisrc_allocate_and_init_outbound_q(pqisrc_softstate_t *softs, ob_queue_t *ob_q,
char *tag)
{
struct dma_mem *dma_mem = &ob_q->alloc_dma;
uint32_t ob_array_size = 0;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
int ret = PQI_STATUS_SUCCESS;
ob_array_size = ob_q->num_elem * ob_q->elem_size;
ASSERT(ob_array_size > 0);
alloc_size = ob_array_size + PQI_CI_PI_ALIGN + PQI_ADDR_ALIGN; /* for OB PI */
/* Allocate memory for the Q */
memset(dma_mem, 0, sizeof(*dma_mem));
os_strlcpy(dma_mem->tag, tag, sizeof(dma_mem->tag));
dma_mem->size = alloc_size;
dma_mem->align = PQI_ADDR_ALIGN;
ret = os_dma_mem_alloc(softs, &ob_q->alloc_dma);
if (ret) {
DBG_ERR("Failed to Allocate Q tag=%s ret=%d\n", dma_mem->tag, ret);
goto err_out;
}
DBG_INIT("alloc tag=%s size=0x%x align=0x%x virt_addr=%p dma_addr=%p\n",
dma_mem->tag, dma_mem->size, dma_mem->align, dma_mem->virt_addr, (void*)dma_mem->dma_addr);
/* Setup the address */
virt_addr = dma_mem->virt_addr;
dma_addr = dma_mem->dma_addr;
ASSERT(!((uint64_t)virt_addr & PQI_ADDR_ALIGN_MASK));
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK));
ob_q->array_virt_addr = virt_addr;
ob_q->array_dma_addr = dma_addr;
ob_q->ci_local = 0;
/* update addr for the next user */
virt_addr += ob_array_size;
dma_addr += ob_array_size;
/* OB PI */
ASSERT(!(dma_addr & PQI_CI_PI_ALIGN_MASK));
ob_q->pi_virt_addr = (uint32_t*)virt_addr;
ob_q->pi_dma_addr = dma_addr;
/* update addr to show the end next user */
virt_addr += PQI_CI_PI_ALIGN;
DBG_INIT("ob_q: virt_addr=%p, pi_dma_addr=%p elem=%u size=%u\n",
ob_q->array_virt_addr, (void*)ob_q->pi_dma_addr, ob_q->num_elem, ob_array_size);
/* Verify we aren't out of bounds from allocation */
ASSERT(virt_addr <= ((char*)dma_mem->virt_addr + alloc_size));
DBG_FUNC("OUT\n");
return ret;
err_out:
DBG_FUNC("failed OUT\n");
return PQI_STATUS_FAILURE;
}
/*
* Allocate DMA memory for admin queue and initialize.
*/
int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
{
int ret;
ib_queue_t *admin_ib_q = &softs->admin_ib_queue;
ob_queue_t *admin_ob_q = &softs->admin_ob_queue;
ret = pqisrc_allocate_and_init_inbound_q(softs, admin_ib_q, "admin_queue");
if (!ret) {
admin_ib_q->q_id = PQI_ADMIN_IB_QUEUE_ID;
ret = pqisrc_allocate_and_init_outbound_q(softs, admin_ob_q, "admin_queue");
if(!ret)
admin_ob_q->q_id = PQI_ADMIN_OB_QUEUE_ID;
else {
if(softs->admin_ib_queue.lockcreated==true) {
OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
softs->admin_ib_queue.lockcreated = false;
}
if (softs->admin_ib_queue.alloc_dma.virt_addr)
os_dma_mem_free(softs, &softs->admin_ib_queue.alloc_dma);
}
}
else
DBG_ERR("Failed to create Admin Queue pair\n");
return ret;
}
/*
* Subroutine used to create (or) delete the admin queue requested.
*/
@ -264,17 +352,17 @@ pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
(void*)softs->admin_ib_queue.array_dma_addr);
DBG_INFO(" softs->admin_ib_queue.array_virt_addr : %p\n",
(void*)softs->admin_ib_queue.array_virt_addr);
DBG_INFO(" softs->admin_ib_queue.num_elem : %d\n",
DBG_INFO(" softs->admin_ib_queue.num_elem : %u\n",
softs->admin_ib_queue.num_elem);
DBG_INFO(" softs->admin_ib_queue.elem_size : %d\n",
DBG_INFO(" softs->admin_ib_queue.elem_size : %u\n",
softs->admin_ib_queue.elem_size);
DBG_INFO(" softs->admin_ob_queue.array_dma_addr : %p\n",
(void*)softs->admin_ob_queue.array_dma_addr);
DBG_INFO(" softs->admin_ob_queue.array_virt_addr : %p\n",
(void*)softs->admin_ob_queue.array_virt_addr);
DBG_INFO(" softs->admin_ob_queue.num_elem : %d\n",
DBG_INFO(" softs->admin_ob_queue.num_elem : %u\n",
softs->admin_ob_queue.num_elem);
DBG_INFO(" softs->admin_ob_queue.elem_size : %d\n",
DBG_INFO(" softs->admin_ob_queue.elem_size : %u\n",
softs->admin_ob_queue.elem_size);
DBG_INFO(" softs->admin_ib_queue.pi_register_abs : %p\n",
(void*)softs->admin_ib_queue.pi_register_abs);
@ -289,6 +377,7 @@ int
pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
/* struct pqi_dev_adminq_cap *pqi_cap; */
uint32_t admin_q_param = 0;
DBG_FUNC("IN\n");
@ -371,8 +460,11 @@ pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
return ret;
err_lock:
#if 0
pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
#endif
err_q_create:
os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
pqisrc_destroy_admin_queue(softs);
err_out:
DBG_FUNC("failed OUT\n");
return ret;
@ -425,13 +517,14 @@ pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
int ret = PQI_STATUS_SUCCESS;
ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false);
if (ret) {
DBG_ERR("Failed to Delete Event Q %d\n", softs->event_q.q_id);
DBG_ERR("Failed to Delete Event Q %u\n", softs->event_q.q_id);
}
softs->event_q.created = false;
}
/* Free the memory */
os_dma_mem_free(softs, &softs->event_q_dma_mem);
if (softs->event_q.alloc_dma.virt_addr)
os_dma_mem_free(softs, &softs->event_q.alloc_dma);
DBG_FUNC("OUT\n");
}
@ -444,44 +537,42 @@ pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = NULL;
uint32_t total_op_ibq = softs->num_op_raid_ibq;
int i;
DBG_FUNC("IN\n");
for (i = 0; i < softs->num_op_raid_ibq; i++) {
/* OP RAID IB Q */
for (i = 0; i < total_op_ibq; i++) {
int repeat = 0;
/* RAID first */
op_ib_q = &softs->op_raid_ib_q[i];
release_queue:
if (op_ib_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id,
true);
if (ret) {
DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id);
DBG_ERR("Failed to Delete IB Q %u\n",
op_ib_q->q_id);
}
op_ib_q->created = false;
}
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
/* OP AIO IB Q */
op_ib_q = &softs->op_aio_ib_q[i];
if (op_ib_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
if (ret) {
DBG_ERR("Failed to Delete AIO IB Q %d\n",op_ib_q->q_id);
}
op_ib_q->created = false;
if (op_ib_q->lockcreated == true) {
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
/* Free the memory */
if (op_ib_q->alloc_dma.virt_addr)
os_dma_mem_free(softs, &op_ib_q->alloc_dma);
if (repeat < 1) {
repeat++;
op_ib_q = &softs->op_aio_ib_q[i];
goto release_queue;
}
}
/* Free the memory */
os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
DBG_FUNC("OUT\n");
}
@ -493,23 +584,27 @@ pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i;
ob_queue_t *op_ob_q = NULL;
DBG_FUNC("IN\n");
for (i = 0; i < softs->num_op_obq; i++) {
ob_queue_t *op_ob_q = NULL;
op_ob_q = &softs->op_ob_q[i];
if (op_ob_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false);
if (ret) {
DBG_ERR("Failed to Delete OB Q %d\n",op_ob_q->q_id);
DBG_ERR("Failed to Delete OB Q %u\n",op_ob_q->q_id);
}
op_ob_q->created = false;
}
/* Free the memory */
if (op_ob_q->alloc_dma.virt_addr)
os_dma_mem_free(softs, &op_ob_q->alloc_dma);
}
/* Free the memory */
os_dma_mem_free(softs, &softs->op_obq_dma_mem);
DBG_FUNC("OUT\n");
}
@ -522,11 +617,22 @@ pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
if(softs->admin_ib_queue.lockcreated==true) {
OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
softs->admin_ib_queue.lockcreated = false;
}
#if 0
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
#endif
os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
if (softs->admin_ib_queue.alloc_dma.virt_addr)
os_dma_mem_free(softs, &softs->admin_ib_queue.alloc_dma);
if (softs->admin_ob_queue.alloc_dma.virt_addr)
os_dma_mem_free(softs, &softs->admin_ob_queue.alloc_dma);
DBG_FUNC("OUT\n");
return ret;
@ -689,15 +795,8 @@ int
pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
uint32_t num_elem;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint64_t event_q_pi_dma_start_offset = 0;
uint32_t event_q_pi_virt_start_offset = 0;
char *event_q_pi_virt_start_addr = NULL;
ob_queue_t *event_q = NULL;
ob_queue_t *event_q = &softs->event_q;
DBG_FUNC("IN\n");
@ -710,47 +809,26 @@ pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
* for queue size calculation.
*/
#ifdef SHARE_EVENT_QUEUE_FOR_IO
num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM);
num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_MAX_EVENT_QUEUE_ELEM_NUM);
#else
num_elem = PQISRC_NUM_EVENT_Q_ELEM;
num_elem = PQISRC_MAX_EVENT_QUEUE_ELEM_NUM;
#endif
alloc_size = num_elem * PQISRC_EVENT_Q_ELEM_SIZE;
event_q_pi_dma_start_offset = alloc_size;
event_q_pi_virt_start_offset = alloc_size;
alloc_size += sizeof(uint32_t); /*For IBQ CI*/
event_q->num_elem = num_elem;
event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE_BYTES;
ret = pqisrc_allocate_and_init_outbound_q(softs, event_q, "event_queue");
/* Allocate memory for event queues */
softs->event_q_dma_mem.tag = "event_queue";
softs->event_q_dma_mem.size = alloc_size;
softs->event_q_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->event_q_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Event Q ret : %d\n"
, ret);
DBG_ERR("Failed to Allocate EventQ\n");
goto err_out;
}
/* Set up the address */
virt_addr = softs->event_q_dma_mem.virt_addr;
dma_addr = softs->event_q_dma_mem.dma_addr;
event_q_pi_dma_start_offset += dma_addr;
event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset;
event_q = &softs->event_q;
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr);
event_q->q_id = PQI_OP_EVENT_QUEUE_ID;
event_q->num_elem = num_elem;
event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE;
event_q->pi_dma_addr = event_q_pi_dma_start_offset;
event_q->pi_virt_addr = (uint32_t *)event_q_pi_virt_start_addr;
event_q->intr_msg_num = 0; /* vector zero for event */
ASSERT(!(event_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_obq(softs,event_q);
if (ret) {
DBG_ERR("Failed to Create EventQ %d\n",event_q->q_id);
DBG_ERR("Failed to Create EventQ %u\n",event_q->q_id);
goto err_out_create;
}
event_q->created = true;
@ -772,115 +850,62 @@ int
pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t ibq_size = 0;
uint64_t ib_ci_dma_start_offset = 0;
char *ib_ci_virt_start_addr = NULL;
uint32_t ib_ci_virt_start_offset = 0;
uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
ib_queue_t *op_ib_q = NULL;
uint32_t num_op_ibq = softs->num_op_raid_ibq +
softs->num_op_aio_ibq;
uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
uint32_t total_op_ibq = softs->num_op_raid_ibq + softs->num_op_aio_ibq;
int i = 0;
char *string = NULL;
DBG_FUNC("IN\n");
/* Calculate memory requirements */
ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size;
alloc_size = num_op_ibq * ibq_size;
/* CI indexes starts after Queue element array */
ib_ci_dma_start_offset = alloc_size;
ib_ci_virt_start_offset = alloc_size;
alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/
/* Allocate memory for IB queues */
softs->op_ibq_dma_mem.tag = "op_ib_queue";
softs->op_ibq_dma_mem.size = alloc_size;
softs->op_ibq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->op_ibq_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n",
ret);
goto err_out;
}
/* Set up the address */
virt_addr = softs->op_ibq_dma_mem.virt_addr;
dma_addr = softs->op_ibq_dma_mem.dma_addr;
ib_ci_dma_start_offset += dma_addr;
ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset;
ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq);
for (i = 0; i < softs->num_op_raid_ibq; i++) {
for (i = 0; i < total_op_ibq; i++) {
/* OP RAID IB Q */
op_ib_q = &softs->op_raid_ib_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
if (i % 2 == 0)
{
op_ib_q = &softs->op_raid_ib_q[i/2];
string = "raid";
}
else
{
op_ib_q = &softs->op_aio_ib_q[i/2];
string = "aio";
}
/* Allocate memory for IB queues */
op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->max_ibq_elem_size;
ret = pqisrc_allocate_and_init_inbound_q(softs, op_ib_q, "op_ib_queue");
if (ret) {
DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n",
ret);
goto err_out;
}
op_ib_q->q_id = ibq_id++;
snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "%s_ibqlock_%d", string, i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
if(ret){
/* TODO: error handling */
DBG_ERR("raid_ibqlock %d init failed\n", i);
op_ib_q->lockcreated = false;
goto err_lock;
if(ret){
/* TODO: error handling */
DBG_ERR("%s %d init failed\n", string, i);
op_ib_q->lockcreated = false;
goto err_lock;
}
op_ib_q->lockcreated = true;
op_ib_q->lockcreated = true;
op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(2 * i * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(2 * i * sizeof(uint32_t)));
ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
if (i % 2 == 0)
ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
else
ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n",
__func__, op_ib_q->q_id);
DBG_ERR("Failed to Create OP IBQ type=%s id=%u\n",
string, op_ib_q->q_id);
goto err_out_create;
}
op_ib_q->created = true;
/* OP AIO IB Q */
virt_addr += ibq_size;
dma_addr += ibq_size;
op_ib_q = &softs->op_aio_ib_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
if(ret){
/* TODO: error handling */
DBG_ERR("aio_ibqlock %d init failed\n", i);
op_ib_q->lockcreated = false;
goto err_lock;
}
op_ib_q->lockcreated = true;
op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(((2 * i) + 1) * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(((2 * i) + 1) * sizeof(uint32_t)));
ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id);
goto err_out_create;
}
op_ib_q->created = true;
virt_addr += ibq_size;
dma_addr += ibq_size;
}
DBG_FUNC("OUT\n");
@ -888,8 +913,8 @@ pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
err_lock:
err_out_create:
pqisrc_destroy_op_ib_queues(softs);
err_out:
pqisrc_destroy_op_ib_queues(softs);
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
@ -901,16 +926,8 @@ int
pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t obq_size = 0;
uint64_t ob_pi_dma_start_offset = 0;
uint32_t ob_pi_virt_start_offset = 0;
char *ob_pi_virt_start_addr = NULL;
uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID;
ob_queue_t *op_ob_q = NULL;
uint32_t num_op_obq = softs->num_op_obq;
int i = 0;
DBG_FUNC("IN\n");
@ -923,65 +940,41 @@ pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
*/
ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4);
obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size;
alloc_size += num_op_obq * obq_size;
/* PI indexes starts after Queue element array */
ob_pi_dma_start_offset = alloc_size;
ob_pi_virt_start_offset = alloc_size;
alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/
/* Allocate memory for OB queues */
softs->op_obq_dma_mem.tag = "op_ob_queue";
softs->op_obq_dma_mem.size = alloc_size;
softs->op_obq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->op_obq_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n",
ret);
goto err_out;
}
/* Set up the address */
virt_addr = softs->op_obq_dma_mem.virt_addr;
dma_addr = softs->op_obq_dma_mem.dma_addr;
ob_pi_dma_start_offset += dma_addr;
ob_pi_virt_start_addr = virt_addr + ob_pi_virt_start_offset;
DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq);
DBG_INIT("softs->num_op_obq %u max_obq_elem_size=%u\n",softs->num_op_obq, softs->max_obq_elem_size);
for (i = 0; i < softs->num_op_obq; i++) {
op_ob_q = &softs->op_ob_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr);
/* Allocate memory for OB queues */
op_ob_q->num_elem = softs->num_elem_per_op_obq;
op_ob_q->elem_size = PQISRC_OP_OBQ_ELEM_SIZE_BYTES;
ret = pqisrc_allocate_and_init_outbound_q(softs, op_ob_q, "op_ob_queue");
if (ret) {
DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n",
ret);
goto err_out;
}
op_ob_q->q_id = obq_id++;
if(softs->share_opq_and_eventq == true)
op_ob_q->intr_msg_num = i;
else
op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
op_ob_q->num_elem = softs->num_elem_per_op_obq;
op_ob_q->elem_size = softs->obq_elem_size;
op_ob_q->pi_dma_addr = ob_pi_dma_start_offset +
(i * sizeof(uint32_t));
op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr +
(i * sizeof(uint32_t)));
ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_obq(softs,op_ob_q);
ret = pqisrc_create_op_obq(softs, op_ob_q);
if (ret) {
DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id);
DBG_ERR("Failed to Create OP OBQ %u\n",op_ob_q->q_id);
goto err_out_create;
}
op_ob_q->created = true;
virt_addr += obq_size;
dma_addr += obq_size;
}
DBG_FUNC("OUT\n");
return ret;
err_out_create:
pqisrc_destroy_op_ob_queues(softs);
err_out:
pqisrc_destroy_op_ob_queues(softs);
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -32,12 +32,29 @@
void
pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,rcb_t *rcb)
{
DBG_FUNC("IN");
DBG_FUNC("IN\n");
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
rcb->req_pending = false;
DBG_FUNC("OUT");
DBG_FUNC("OUT\n");
}
/* Safely determines if cdb is available and if so, will return SCSI opcode or
BMIC cmd if BMIC op code is detected */
uint8_t
pqisrc_get_cmd_from_rcb(rcb_t *rcb)
{
uint8_t opcode = 0xFF;
if (rcb && rcb->cdbp)
{
opcode = rcb->cdbp[0];
if (IS_BMIC_OPCODE(opcode))
return rcb->cdbp[6];
}
return opcode;
}
/*
@ -49,33 +66,40 @@ pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
{
raid_path_error_info_elem_t error_info;
DBG_FUNC("IN");
DBG_FUNC("IN\n");
rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
(err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
memcpy(&error_info, rcb->error_info, sizeof(error_info));
DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n",
error_info.status, error_info.data_in_result, error_info.data_out_result);
rcb->status = REQUEST_FAILED;
rcb->status = PQI_STATUS_TIMEOUT;
switch (error_info.data_out_result) {
case PQI_RAID_DATA_IN_OUT_GOOD:
if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD)
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
break;
case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD ||
error_info.status == PQI_RAID_STATUS_CHECK_CONDITION)
rcb->status = REQUEST_SUCCESS;
rcb->status = PQI_STATUS_SUCCESS;
break;
default:
DBG_WARN("error_status 0x%x data_in_result 0x%x data_out_result 0x%x cmd rcb tag 0x%x\n",
error_info.status, error_info.data_in_result, error_info.data_out_result, rcb->tag);
}
if (rcb->status != PQI_STATUS_SUCCESS)
{
DBG_INFO("error_status=0x%x data_in=0x%x data_out=0x%x detail=0x%x\n",
error_info.status, error_info.data_in_result, error_info.data_out_result,
pqisrc_get_cmd_from_rcb(rcb));
}
rcb->req_pending = false;
DBG_FUNC("OUT");
DBG_FUNC("OUT\n");
}
/*
@ -84,11 +108,11 @@ pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
void
pqisrc_process_io_response_success(pqisrc_softstate_t *softs, rcb_t *rcb)
{
DBG_FUNC("IN");
DBG_FUNC("IN\n");
os_io_response_success(rcb);
DBG_FUNC("OUT");
DBG_FUNC("OUT\n");
}
static void
@ -120,6 +144,23 @@ pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc
}
}
/* Suppress common errors unless verbose debug flag is on */
boolean_t
suppress_innocuous_error_prints(pqisrc_softstate_t *softs, rcb_t *rcb)
{
uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
if ((opcode == SCSI_INQUIRY || /* 0x12 */
opcode == SCSI_MODE_SENSE || /* 0x1a */
opcode == SCSI_REPORT_LUNS || /* 0xa0 */
opcode == SCSI_LOG_SENSE || /* 0x4d */
opcode == SCSI_ATA_PASSTHRU16) /* 0x85 */
&& (softs->err_resp_verbose == false))
return true;
return false;
}
static void
pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
{
@ -136,6 +177,9 @@ pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_
void
pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
{
if (suppress_innocuous_error_prints(softs, rcb))
return;
pqisrc_print_buffer(softs, "sense data", sense_data, 32, 0);
pqisrc_show_sense_data_simple(softs, rcb, sense_data);
@ -144,6 +188,105 @@ pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_
}
/* dumps the aio error info and sense data then breaks down the output */
void
pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, aio_path_error_info_elem_t *aio_err)
{
DBG_NOTE("\n");
DBG_NOTE("aio err: status=0x%x serv_resp=0x%x data_pres=0x%x data_len=0x%x\n",
aio_err->status, aio_err->service_resp, aio_err->data_pres, aio_err->data_len);
pqisrc_print_buffer(softs, "aio err info", aio_err,
offsetof(aio_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN);
pqisrc_show_sense_data_full(softs, rcb, &aio_err->sense_data);
}
/* dumps the raid error info and sense data then breaks down the output */
void
pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, raid_path_error_info_elem_t *raid_err)
{
DBG_NOTE("\n");
DBG_NOTE("raid err: data_in=0x%x out=0x%x status=0x%x sense_len=0x%x resp_len=0x%x\n",
raid_err->data_in_result, raid_err->data_in_result,
raid_err->status, raid_err->sense_data_len, raid_err->resp_data_len);
pqisrc_print_buffer(softs, "raid err info", raid_err,
offsetof(raid_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN);
pqisrc_show_sense_data_full(softs, rcb, &raid_err->sense_data);
}
/* return true if this an innocuous error */
boolean_t
pqisrc_is_innocuous_error(pqisrc_softstate_t *softs, rcb_t *rcb, void *err_info)
{
uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
/* These SCSI cmds are frequently cause "underrun" and other minor "error"
conditions while determining log page length, support, etc. */
if (opcode != SCSI_INQUIRY && /* 0x12 */
opcode != SCSI_MODE_SENSE && /* 0x1a */
opcode != SCSI_REPORT_LUNS && /* 0xa0 */
opcode != SCSI_LOG_SENSE && /* 0x4d */
opcode != SCSI_ATA_PASSTHRU16) /* 0x85 */
{
return false;
}
/* treat all cmds above as innocuous unless verbose flag is set. */
if (softs->err_resp_verbose == false)
return true;
if (rcb->path == AIO_PATH)
{
aio_path_error_info_elem_t *aio_err = err_info;
uint8_t key, asc, ascq;
/* Byte[0]=Status=0x51, Byte[1]=service_resp=0x01 */
if (aio_err->status == PQI_AIO_STATUS_UNDERRUN &&
aio_err->service_resp == PQI_AIO_SERV_RESPONSE_FAILURE)
{
return true;
}
/* get the key info so we can apply more filters... */
pqisrc_extract_sense_data(&aio_err->sense_data, &key, &asc, &ascq);
/* Seeing a lot of invalid field in CDB for REPORT LUNs on AIO path.
Example CDB = a0 00 11 00 00 00 00 00 20 08 00 00
So filter out the full dump info for now. Also wonder if we should
just send REPORT LUNS to raid path? */
if (opcode == SCSI_REPORT_LUNS &&
key == 5 && asc == 0x24)
{
pqisrc_show_sense_data_simple(softs, rcb, &aio_err->sense_data);
return true;
}
/* may want to return true here eventually? */
}
else
{
raid_path_error_info_elem_t *raid_err = err_info;
/* Byte[1]=data_out=0x01 */
if (raid_err->data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW)
return true;
/* We get these a alot: leave a tiny breadcrumb about the error,
but don't do full spew about it */
if (raid_err->status == PQI_AIO_STATUS_CHECK_CONDITION)
{
pqisrc_show_sense_data_simple(softs, rcb, &raid_err->sense_data);
return true;
}
}
return false;
}
/*
* Process the error info for AIO in the case of failure.
*/
@ -153,20 +296,30 @@ pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
{
aio_path_error_info_elem_t *err_info = NULL;
DBG_FUNC("IN");
DBG_FUNC("IN\n");
ASSERT(rcb->path == AIO_PATH);
err_info = (aio_path_error_info_elem_t*)
softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx);
return;
}
/* filter out certain underrun/success "errors" from printing */
if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) {
if (softs->err_resp_verbose == true)
pqisrc_show_rcb_details(softs, rcb,
"aio error", err_info);
}
os_aio_response_error(rcb, err_info);
DBG_FUNC("OUT");
DBG_FUNC("OUT\n");
}
/*
@ -178,20 +331,31 @@ pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
{
raid_path_error_info_elem_t *err_info = NULL;
DBG_FUNC("IN");
DBG_FUNC("IN\n");
ASSERT(rcb->path == RAID_PATH);
err_info = (raid_path_error_info_elem_t*)
softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx);
return;
}
/* filter out certain underrun/success "errors" from printing */
if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) {
if( softs->err_resp_verbose == true )
pqisrc_show_rcb_details(softs, rcb,
"raid error", err_info);
}
os_raid_response_error(rcb, err_info);
DBG_FUNC("OUT");
DBG_FUNC("OUT\n");
}
/*
@ -201,7 +365,7 @@ int
pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
pqi_tmf_resp_t *tmf_resp)
{
int ret = REQUEST_SUCCESS;
int ret = PQI_STATUS_SUCCESS;
uint32_t tag = (uint32_t)tmf_resp->req_id;
rcb_t *rcb = &softs->rcb[tag];
@ -212,18 +376,19 @@ pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
switch (tmf_resp->resp_code) {
case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE:
case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED:
ret = REQUEST_SUCCESS;
ret = PQI_STATUS_SUCCESS;
break;
default:
DBG_WARN("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
ret = REQUEST_FAILED;
DBG_ERR("Tag #0x%08x TMF Failed, Response code : 0x%x\n",
rcb->tag, tmf_resp->resp_code);
ret = PQI_STATUS_TIMEOUT;
break;
}
rcb->status = ret;
rcb->req_pending = false;
DBG_FUNC("OUT");
DBG_FUNC("OUT\n");
return ret;
}
@ -231,7 +396,7 @@ static int
pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
{
int ret = REQUEST_SUCCESS;
int ret = PQI_STATUS_SUCCESS;
switch(response->status) {
case PQI_VENDOR_RESPONSE_IU_SUCCESS:
@ -239,7 +404,7 @@ pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
case PQI_VENDOR_RESPONSE_IU_UNSUCCESS:
case PQI_VENDOR_RESPONSE_IU_INVALID_PARAM:
case PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC:
ret = REQUEST_FAILED;
ret = PQI_STATUS_TIMEOUT;
break;
}
@ -256,21 +421,22 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
ob_queue_t *ob_q;
struct pqi_io_response *response;
uint32_t oq_pi, oq_ci;
pqi_scsi_dev_t *dvp = NULL;
pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN");
DBG_FUNC("IN\n");
ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
oq_ci = ob_q->ci_local;
oq_pi = *(ob_q->pi_virt_addr);
DBG_INFO("ci : %d pi : %d qid : %d\n", oq_ci, oq_pi, ob_q->q_id);
DBG_IO("ci : %u pi : %u qid : %u\n", oq_ci, oq_pi, ob_q->q_id);
while (1) {
boolean_t os_scsi_cmd = false;
rcb_t *rcb = NULL;
uint32_t tag = 0;
uint32_t offset;
boolean_t os_scsi_cmd = false;
if (oq_pi == oq_ci)
break;
@ -282,7 +448,7 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
rcb = &softs->rcb[tag];
/* Make sure we are processing a valid response. */
if ((rcb->tag != tag) || (rcb->req_pending == false)) {
DBG_ERR("No such request pending with tag : %x", tag);
DBG_ERR("No such request pending with tag : %x rcb->tag : %x", tag, rcb->tag);
oq_ci = (oq_ci + 1) % ob_q->num_elem;
break;
}
@ -291,13 +457,21 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
* pqisrc_wait_on_condition(softs,rcb,timeout).
*/
if (rcb->timedout) {
DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag %d\n", tag);
DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag 0x%x\n", tag);
oq_ci = (oq_ci + 1) % ob_q->num_elem;
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, tag);
break;
}
if (rcb->host_wants_to_abort_this)
{
DBG_INFO("cmd that was aborted came back. tag=%u\n", rcb->tag);
}
if (rcb->is_abort_cmd_from_host)
{
DBG_INFO("abort cmd came back. tag=%u\n", rcb->tag);
}
if (IS_OS_SCSICMD(rcb)) {
dvp = rcb->dvp;
if (dvp)
@ -306,8 +480,7 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
DBG_WARN("Received IO completion for the Null device!!!\n");
}
DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type);
DBG_IO("response.header.iu_type : %x \n", response->header.iu_type);
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
@ -315,7 +488,6 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
rcb->success_cmp_callback(softs, rcb);
if (os_scsi_cmd)
pqisrc_decrement_device_active_io(softs, dvp);
break;
case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
@ -345,6 +517,6 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
ob_q->ci_local = oq_ci;
PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, ob_q->ci_local );
DBG_FUNC("OUT");
ob_q->ci_register_offset, ob_q->ci_local );
DBG_FUNC("OUT\n");
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -39,6 +39,7 @@ sis_disable_msix(pqisrc_softstate_t *softs)
db_reg &= ~SIS_ENABLE_MSIX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
DBG_FUNC("OUT\n");
}
@ -55,6 +56,7 @@ sis_enable_intx(pqisrc_softstate_t *softs)
db_reg |= SIS_ENABLE_INTX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX)
!= PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to wait for enable intx db bit to clear\n");
@ -74,6 +76,7 @@ sis_disable_intx(pqisrc_softstate_t *softs)
db_reg &= ~SIS_ENABLE_INTX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
DBG_FUNC("OUT\n");
}
@ -90,7 +93,7 @@ sis_disable_interrupt(pqisrc_softstate_t *softs)
break;
case INTR_TYPE_MSI:
case INTR_TYPE_MSIX:
sis_disable_msix(softs);
sis_disable_msix(softs);
break;
default:
DBG_ERR("Inerrupt mode none!\n");
@ -124,6 +127,7 @@ pqisrc_reenable_sis(pqisrc_softstate_t *softs)
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(REENABLE_SIS));
OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */
COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
REENABLE_SIS) == 0), timeout)
@ -284,6 +288,21 @@ pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
softs->pqi_cap.max_sg_elem = mb[1];
softs->pqi_cap.max_transfer_size = mb[2];
softs->pqi_cap.max_outstanding_io = mb[3];
if (softs->pqi_cap.max_outstanding_io >
PQISRC_MAX_OUTSTANDING_REQ) {
DBG_WARN("Controller-supported max outstanding "
"commands %u reduced to %d to align with "
"driver-supported max.\n",
softs->pqi_cap.max_outstanding_io,
PQISRC_MAX_OUTSTANDING_REQ);
softs->pqi_cap.max_outstanding_io =
PQISRC_MAX_OUTSTANDING_REQ;
}
#ifdef DEVICE_HINT
bsd_set_hint_adapter_cap(softs);
#endif
softs->pqi_cap.conf_tab_off = mb[4];
softs->pqi_cap.conf_tab_sz = mb[5];
@ -295,6 +314,11 @@ pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
softs->pqi_cap.max_transfer_size);
DBG_INIT("max_outstanding_io = %x\n",
softs->pqi_cap.max_outstanding_io);
/* DBG_INIT("config_table_offset = %x\n",
softs->pqi_cap.conf_tab_off);
DBG_INIT("config_table_size = %x\n",
softs->pqi_cap.conf_tab_sz);
*/
}
DBG_FUNC("OUT\n");
@ -318,7 +342,7 @@ pqisrc_init_struct_base(pqisrc_softstate_t *softs)
memset(&init_struct_mem, 0, sizeof(struct dma_mem));
init_struct_mem.size = sizeof(struct init_base_struct);
init_struct_mem.align = PQISRC_INIT_STRUCT_DMA_ALIGN;
init_struct_mem.tag = "init_struct";
os_strlcpy(init_struct_mem.tag, "init_struct", sizeof(init_struct_mem.tag));
ret = os_dma_mem_alloc(softs, &init_struct_mem);
if (ret) {
DBG_ERR("Failed to Allocate error buffer ret : %d\n",
@ -337,7 +361,7 @@ pqisrc_init_struct_base(pqisrc_softstate_t *softs)
/* Allocate error buffer */
softs->err_buf_dma_mem.align = PQISRC_ERR_BUF_DMA_ALIGN;
softs->err_buf_dma_mem.tag = "error_buffer";
os_strlcpy(softs->err_buf_dma_mem.tag, "error_buffer", sizeof(softs->err_buf_dma_mem.tag));
ret = os_dma_mem_alloc(softs, &softs->err_buf_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate error buffer ret : %d\n",
@ -422,7 +446,7 @@ pqisrc_sis_init(pqisrc_softstate_t *softs)
if (ext_prop & SIS_SUPPORT_PQI_RESET_QUIESCE)
softs->pqi_reset_quiesce_allowed = true;
/* Send GET_COMM_PREFERRED_SETTINGS (26h) */
/* Send GET_COMM_PREFERRED_SETTINGS (26h), TODO : is it required */
ret = pqisrc_get_preferred_settings(softs);
if (ret) {
DBG_ERR("Failed to get adapter pref settings\n");

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -27,6 +27,9 @@
#ifndef _PQI_STRUCTURES_H
#define _PQI_STRUCTURES_H
#include "smartpqi_defines.h"
struct bmic_host_wellness_driver_version {
uint8_t start_tag[4];
uint8_t driver_version_tag[2];
@ -36,6 +39,7 @@ struct bmic_host_wellness_driver_version {
}OS_ATTRIBUTE_PACKED;
struct bmic_host_wellness_time {
uint8_t start_tag[4];
uint8_t time_tag[2];
@ -53,6 +57,7 @@ struct bmic_host_wellness_time {
}OS_ATTRIBUTE_PACKED;
/* As per PQI Spec pqi-2r00a , 6.2.2. */
/* device capability register , for admin q table 24 */
@ -187,15 +192,17 @@ typedef union pqi_reset_reg {
/* Memory descriptor for DMA memory allocation */
typedef struct dma_mem {
void *virt_addr;
dma_addr_t dma_addr;
dma_addr_t dma_addr;
uint32_t size;
uint32_t align;
char *tag;
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
char tag[32];
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
}dma_mem_t;
/* Lock should be 8 byte aligned */
/* Lock should be 8 byte aligned
TODO : need to apply aligned for lock alone ?
*/
#ifndef LOCKFREE_STACK
@ -433,27 +440,6 @@ struct pqi_event {
uint32_t additional_event_id;
};
typedef struct pqi_vendor_general_request {
iu_header_t header;
uint16_t response_queue_id;
uint8_t work_area[2];
uint16_t request_id;
uint16_t function_code;
union {
struct {
uint16_t first_section;
uint16_t last_section;
uint8_t reserved1[48];
} OS_ATTRIBUTE_PACKED config_table_update;
struct {
uint64_t buffer_address;
uint32_t buffer_length;
uint8_t reserved2[40];
} OS_ATTRIBUTE_PACKED ofa_memory_allocation;
} data;
} OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t;
typedef struct pqi_vendor_general_response {
iu_header_t header;
uint16_t reserved1;
@ -474,29 +460,30 @@ typedef struct op_q_params
} OS_ATTRIBUTE_PACKED op_q_params;
/* "Fixed Format Sense Data" (0x70 or 0x71) (Table 45 in SPC5) */
typedef struct sense_data_fixed {
uint8_t response_code : 7; // Byte 0, 0x70 or 0x71
uint8_t valid : 1; // Byte 0, bit 7
uint8_t byte_1; // Byte 1
uint8_t sense_key : 4; // Byte 2, bit 0-3 (Key)
uint8_t byte_2_other : 4; // Byte 2, bit 4-7
uint32_t information; // Byte 3-6, big-endian like block # in CDB
uint8_t addtnl_length; // Byte 7
uint8_t cmd_specific[4]; // Byte 8-11
uint8_t sense_code; // Byte 12 (ASC)
uint8_t sense_qual; // Byte 13 (ASCQ)
uint8_t fru_code; // Byte 14
uint8_t sense_key_specific[3]; // Byte 15-17
uint8_t addtnl_sense[1]; // Byte 18+
uint8_t response_code : 7; /* Byte 0, 0x70 or 0x71 */
uint8_t valid : 1; /* Byte 0, bit 7 */
uint8_t byte_1; /* Byte 1 */
uint8_t sense_key : 4; /* Byte 2, bit 0-3 (Key) */
uint8_t byte_2_other : 4; /* Byte 2, bit 4-7 */
uint32_t information; /* Byte 3-6, big-endian like block # in CDB */
uint8_t addtnl_length; /* Byte 7 */
uint8_t cmd_specific[4]; /* Byte 8-11 */
uint8_t sense_code; /* Byte 12 (ASC) */
uint8_t sense_qual; /* Byte 13 (ASCQ) */
uint8_t fru_code; /* Byte 14 */
uint8_t sense_key_specific[3]; /* Byte 15-17 */
uint8_t addtnl_sense[1]; /* Byte 18+ */
} OS_ATTRIBUTE_PACKED sense_data_fixed_t;
/* Generic Sense Data Descriptor (Table 29 in SPC5) */
typedef struct descriptor_entry
{
uint8_t desc_type; // Byte 9/0
uint8_t desc_type_length; // Byte 10/1
uint8_t desc_type; /* Byte 9/0 */
uint8_t desc_type_length; /* Byte 10/1 */
union
{
/* Sense data descriptor specific */
@ -504,10 +491,10 @@ typedef struct descriptor_entry
/* Information (Type 0) (Table 31 is SPC5) */
struct {
uint8_t byte_2_rsvd : 7; // Byte 11/2
uint8_t valid : 1; // Byte 11/2, bit 7
uint8_t byte_3; // Byte 12/3
uint8_t information[8]; // Byte 13-20/4-11
uint8_t byte_2_rsvd : 7; /* Byte 11/2 */
uint8_t valid : 1; /* Byte 11/2, bit 7 */
uint8_t byte_3; /* Byte 12/3 */
uint8_t information[8]; /* Byte 13-20/4-11 */
} OS_ATTRIBUTE_PACKED type_0;
}u;
@ -515,15 +502,15 @@ typedef struct descriptor_entry
/* "Descriptor Format Sense Data" (0x72 or 0x73) (Table 28 in SPC5) */
typedef struct sense_data_descriptor {
uint8_t response_code : 7; // Byte 0, 0x72 or 0x73
uint8_t byte_0_rsvd: 1; // Byte 0, bit 7
uint8_t sense_key : 4; // Byte 1, bit 0-3 (Key)
uint8_t byte_1_other : 4; // Byte 1, bit 4-7
uint8_t sense_code; // Byte 2 (ASC)
uint8_t sense_qual; // Byte 3 (ASCQ)
uint8_t byte4_6[3]; // Byte 4-6
uint8_t more_length; // Byte 7
descriptor_entry_t descriptor_list; // Bytes 8+
uint8_t response_code : 7; /* Byte 0, 0x72 or 0x73 */
uint8_t byte_0_rsvd: 1; /* Byte 0, bit 7 */
uint8_t sense_key : 4; /* Byte 1, bit 0-3 (Key) */
uint8_t byte_1_other : 4; /* Byte 1, bit 4-7 */
uint8_t sense_code; /* Byte 2 (ASC) */
uint8_t sense_qual; /* Byte 3 (ASCQ) */
uint8_t byte4_6[3]; /* Byte 4-6 */
uint8_t more_length; /* Byte 7 */
descriptor_entry_t descriptor_list; /* Bytes 8+ */
} OS_ATTRIBUTE_PACKED sense_data_descriptor_t;
@ -535,20 +522,18 @@ typedef union sense_data_u
} sense_data_u_t;
/* Driver will use this structure to interpret the error
info element returned from a failed requests */
typedef struct raid_path_error_info_elem {
uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
uint8_t reserved[3]; /* !< Bytes 2-4. */
uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40. Defined in Storport.h */
uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */
uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */
uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
uint32_t data_out_transferred; /* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
uint8_t reserved[3]; /* !< Bytes 2-4. */
uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40.*/
uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */
uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */
uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
uint32_t data_out_transferred;/* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
union
{
sense_data_u_t sense_data;
@ -560,22 +545,26 @@ typedef struct raid_path_error_info_elem {
typedef enum error_data_present
{
DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
} error_data_present_t;
typedef struct aio_path_error_info_elem
{
uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40. Defined in Storport.h */
uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
uint8_t reserved1; /* !< Byte 3. Reserved. */
uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
uint16_t reserved2; /* !< Bytes 10. Reserved. */
uint8_t data[256]; /* !< Bytes 11-267. Response data buffer or Sense data buffer but not both. */
uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40.*/
uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
uint8_t reserved1; /* !< Byte 3. Reserved. */
uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
uint16_t reserved2; /* !< Bytes 10-11. Reserved. */
union
{
sense_data_u_t sense_data; /* */
uint8_t data[256]; /* !< Bytes 12-267. Response data buffer or Sense data buffer but not both. */
};
uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
}OS_ATTRIBUTE_PACKED aio_path_error_info_elem_t;
struct init_base_struct {
@ -603,6 +592,7 @@ typedef struct ib_queue {
boolean_t lockcreated;
char lockname[LOCKNAME_SIZE];
OS_PQILOCK_T lock OS_ATTRIBUTE_ALIGNED(8);
struct dma_mem alloc_dma;
}ib_queue_t;
typedef struct ob_queue {
@ -618,6 +608,7 @@ typedef struct ob_queue {
uint32_t *pi_virt_addr;
dma_addr_t pi_dma_addr;
boolean_t created;
struct dma_mem alloc_dma;
}ob_queue_t;
typedef struct pqisrc_sg_desc{
@ -670,6 +661,23 @@ typedef struct pqi_device_capabilities {
/* IO path */
typedef struct iu_cmd_flags
{
uint8_t data_dir : 2;
uint8_t partial : 1;
uint8_t mem_type : 1;
uint8_t fence : 1;
uint8_t encrypt_enable : 1;
uint8_t res2 : 2;
}OS_ATTRIBUTE_PACKED iu_cmd_flags_t;
typedef struct iu_attr_prio
{
uint8_t task_attr : 3;
uint8_t cmd_prio : 4;
uint8_t res3 : 1;
}OS_ATTRIBUTE_PACKED iu_attr_prio_t;
typedef struct pqi_aio_req {
iu_header_t header;
uint16_t response_queue_id;
@ -678,15 +686,8 @@ typedef struct pqi_aio_req {
uint8_t res1[2];
uint32_t nexus;
uint32_t buf_len;
uint8_t data_dir : 2;
uint8_t partial : 1;
uint8_t mem_type : 1;
uint8_t fence : 1;
uint8_t encrypt_enable : 1;
uint8_t res2 : 2;
uint8_t task_attr : 3;
uint8_t cmd_prio : 4;
uint8_t res3 : 1;
iu_cmd_flags_t cmd_flags;
iu_attr_prio_t attr_prio;
uint16_t encrypt_key_index;
uint32_t encrypt_twk_low;
uint32_t encrypt_twk_high;
@ -699,6 +700,55 @@ typedef struct pqi_aio_req {
sgt_t sg_desc[4];
}OS_ATTRIBUTE_PACKED pqi_aio_req_t;
typedef struct pqi_aio_raid1_write_req {
iu_header_t header;
uint16_t response_queue_id;
uint8_t work_area[2];
uint16_t req_id;
uint16_t volume_id; /* ID of raid volume */
uint32_t nexus_1; /* 1st drive in RAID 1 */
uint32_t nexus_2; /* 2nd drive in RAID 1 */
uint32_t nexus_3; /* 3rd drive in RAID 1 */
uint32_t buf_len;
iu_cmd_flags_t cmd_flags;
iu_attr_prio_t attr_prio;
uint16_t encrypt_key_index;
uint8_t cdb[16];
uint16_t err_idx;
uint8_t num_sg;
uint8_t cdb_len;
uint8_t num_drives; /* drives in raid1 (2 or 3) */
uint8_t reserved_bytes[3];
uint32_t encrypt_twk_low;
uint32_t encrypt_twk_high;
sgt_t sg_desc[4];
}OS_ATTRIBUTE_PACKED pqi_aio_raid1_write_req_t;
typedef struct pqi_aio_raid5or6_write_req {
iu_header_t header;
uint16_t response_queue_id;
uint8_t work_area[2];
uint16_t req_id;
uint16_t volume_id; /* ID of raid volume */
uint32_t data_it_nexus; /* IT nexus of data drive */
uint32_t p_parity_it_nexus;/* It nexus of p parity disk */
uint32_t q_parity_it_nexus;/* It nexus of q parity disk (R6) */
uint32_t buf_len;
iu_cmd_flags_t cmd_flags;
iu_attr_prio_t attr_prio;
uint16_t encrypt_key_index;
uint8_t cdb[16];
uint16_t err_idx;
uint8_t num_sg;
uint8_t cdb_len;
uint8_t xor_multiplier; /* for generating RAID 6 Q parity */
uint8_t reserved[3];
uint32_t encrypt_twk_low;
uint32_t encrypt_twk_high;
uint64_t row; /* logical lba / blocks per row */
uint8_t reserved2[8]; /* changed to reserved, used to stripe_lba */
sgt_t sg_desc[3]; /* only 3 entries for R5/6 */
}OS_ATTRIBUTE_PACKED pqi_aio_raid5or6_write_req_t;
typedef struct pqisrc_raid_request {
iu_header_t header;
@ -722,28 +772,43 @@ typedef struct pqisrc_raid_request {
uint8_t reserved4 : 2;
uint8_t additional_cdb_bytes_usage : 3;
uint8_t reserved5 : 3;
uint8_t cdb[16];
uint8_t reserved[12];
union
{
uint8_t cdb[16];
struct
{
uint8_t op_code; /* Byte 0. SCSI opcode (0x26 or 0x27) */
uint8_t lun_lower; /* Byte 1 */
uint32_t detail; /* Byte 2-5 */
uint8_t cmd; /* Byte 6. Vendor specific op code. */
uint16_t xfer_len; /* Byte 7-8 */
uint8_t lun_upper; /* Byte 9 */
uint8_t unused[6]; /* Bytes 10-15. */
}OS_ATTRIBUTE_PACKED bmic_cdb;
}OS_ATTRIBUTE_PACKED cmd;
uint8_t reserved[11];
uint8_t ml_device_lun_number;
uint32_t timeout_in_sec;
sgt_t sg_descriptors[4];
} OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
}OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
typedef struct pqi_raid_tmf_req {
iu_header_t header;
uint16_t resp_qid;
uint8_t work_area[2];
uint16_t req_id;
uint16_t nexus;
uint8_t res1[2];
uint16_t timeout_in_sec;
uint8_t lun[8];
uint16_t protocol_spec;
uint16_t obq_id_to_manage;
uint16_t req_id_to_manage;
uint8_t tmf;
uint8_t res2 : 7;
uint8_t fence : 1;
iu_header_t header;
uint16_t resp_qid;
uint8_t work_area[2];
uint16_t req_id;
uint16_t nexus;
uint8_t res1[1];
uint8_t ml_device_lun_number;
uint16_t timeout_in_sec;
uint8_t lun[8];
uint16_t protocol_spec;
uint16_t obq_id_to_manage;
uint16_t req_id_to_manage;
uint8_t tmf;
uint8_t res2 : 7;
uint8_t fence : 1;
} OS_ATTRIBUTE_PACKED pqi_raid_tmf_req_t;
typedef struct pqi_aio_tmf_req {
@ -788,6 +853,12 @@ struct pqi_enc_info {
uint32_t encrypt_tweak_upper;
};
typedef uint32_t os_ticks_t;
struct pqi_stream_data {
uint64_t next_lba;
os_ticks_t last_accessed;
};
typedef struct pqi_scsi_device {
device_type_t devtype; /* as reported by INQUIRY command */
@ -810,13 +881,13 @@ typedef struct pqi_scsi_device {
uint8_t device_gone : 1;
uint8_t new_device : 1;
uint8_t volume_offline : 1;
uint8_t scsi_rescan : 1;
uint8_t is_nvme : 1;
uint8_t scsi_rescan : 1;
uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
uint8_t model[16]; /* bytes 16-31 of inquiry data */
uint64_t sas_address;
uint8_t raid_level;
uint16_t queue_depth; /* max. queue_depth for this device */
uint16_t advertised_queue_depth;
uint32_t ioaccel_handle;
uint8_t volume_status;
uint8_t active_path_index;
@ -827,7 +898,7 @@ typedef struct pqi_scsi_device {
int offload_config; /* I/O accel RAID offload configured */
int offload_enabled; /* I/O accel RAID offload enabled */
int offload_enabled_pending;
int offload_to_mirror; /* Send next I/O accelerator RAID
int *offload_to_mirror; /* Send next I/O accelerator RAID
offload request to mirror drive. */
struct raid_map *raid_map; /* I/O accelerator RAID map */
@ -838,34 +909,14 @@ typedef struct pqi_scsi_device {
boolean_t path_destroyed;
boolean_t firmware_queue_depth_set;
OS_ATOMIC64_T active_requests;
struct pqisrc_softstate *softs;
boolean_t schedule_rescan;
boolean_t in_remove;
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
boolean_t is_multi_lun;
}pqi_scsi_dev_t;
typedef struct pqisrc_softstate pqisrc_softstate_t;
typedef struct pqi_firmware_feature pqi_firmware_feature_t;
typedef void (*feature_status_fn)(pqisrc_softstate_t *softs,
pqi_firmware_feature_t *firmware_feature);
struct pqi_firmware_feature {
char *feature_name;
unsigned int feature_bit;
boolean_t supported;
boolean_t enabled;
feature_status_fn feature_status;
};
struct pqi_conf_table_firmware_features {
struct pqi_conf_table_section_header header;
uint16_t num_elements;
uint8_t features_supported[];
};
struct pqi_conf_table_section_info {
struct pqisrc_softstate *softs;
void *section;
uint32_t section_offset;
void *section_addr;
};
struct sense_header_scsi { /* See SPC-3 section 4.5 */
uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
uint8_t sense_key;
@ -877,8 +928,6 @@ struct sense_header_scsi { /* See SPC-3 section 4.5 */
uint8_t additional_length; /* always 0 for fixed sense format */
}OS_ATTRIBUTE_PACKED;
typedef struct report_lun_header {
uint32_t list_length;
uint8_t extended_response;
@ -924,7 +973,7 @@ typedef struct reportlun_queue_depth_entry {
typedef struct reportlun_queue_depth_data {
reportlun_header_t header;
reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */
reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */
}OS_ATTRIBUTE_PACKED reportlun_queue_depth_data_t;
typedef struct raidmap_data {
@ -958,6 +1007,59 @@ typedef struct raid_map {
raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
}OS_ATTRIBUTE_PACKED pqisrc_raid_map_t;
typedef struct aio_row {
uint32_t blks_per_row; /* blocks per row */
uint64_t first; /* first row */
uint64_t last; /* last row */
uint32_t offset_first; /* offset in first row */
uint32_t offset_last; /* offset in last row */
uint16_t data_disks; /* number of data disks per row */
uint16_t total_disks; /* data + parity disks per row. */
}OS_ATTRIBUTE_PACKED pqisrc_aio_row_t;
typedef struct aio_column {
uint32_t first; /* 1st column of req */
uint32_t last; /* last column of req */
}OS_ATTRIBUTE_PACKED pqisrc_aio_column_t;
typedef struct aio_block {
uint64_t first; /* 1st block number of req */
uint64_t last; /* last block number of req */
uint32_t cnt; /* total blocks in req */
uint64_t disk_block; /* block number of phys disk */
}OS_ATTRIBUTE_PACKED pqisrc_aio_block_t;
typedef struct aio_r5or6_loc {
struct aio_row row; /* row information */
struct aio_column col; /* column information */
}OS_ATTRIBUTE_PACKED pqisrc_aio_r5or6_loc_t;
typedef struct aio_map {
uint32_t row;
uint32_t idx; /* index into array of handles */
uint16_t layout_map_count;
}OS_ATTRIBUTE_PACKED pqisrc_aio_map_t;
typedef struct aio_disk_group {
uint32_t first; /* first group */
uint32_t last; /* last group */
uint32_t cur; /* current group */
}OS_ATTRIBUTE_PACKED pqisrc_aio_disk_group_t;
typedef struct aio_req_locator {
uint8_t raid_level;
struct raid_map *raid_map; /* relevant raid map */
struct aio_block block; /* block range and count */
struct aio_row row; /* row range and offset info */
struct aio_column col; /* first/last column info */
struct aio_r5or6_loc r5or6; /* Raid 5/6-specific bits */
struct aio_map map; /* map row, count, and index */
struct aio_disk_group group; /* first, last, and curr group */
boolean_t is_write;
uint32_t stripesz;
uint16_t strip_sz;
int offload_to_mirror;
}OS_ATTRIBUTE_PACKED aio_req_locator_t;
typedef struct bmic_ident_ctrl {
uint8_t conf_ld_count;
@ -1042,6 +1144,55 @@ typedef struct bmic_identify_physical_device {
uint8_t padding[112];
}OS_ATTRIBUTE_PACKED bmic_ident_physdev_t;
typedef struct bmic_sense_feature {
uint8_t opcode;
uint8_t reserved1[1];
uint8_t page;
uint8_t sub_page;
uint8_t reserved2[2];
uint8_t cmd;
uint16_t transfer_length;
uint8_t reserved3[7];
}OS_ATTRIBUTE_PACKED bmic_sense_feature_t;
typedef struct bmic_sense_feature_buffer_header {
uint8_t page;
uint8_t sub_page;
uint16_t buffer_length;
} OS_ATTRIBUTE_PACKED bmic_sense_feature_buffer_header_t;
typedef struct bmic_sense_feature_page_header {
uint8_t page;
uint8_t sub_page;
uint16_t total_length; /** Total length of the page.
* The length is the same wheteher the request buffer is too short or not.
* When printing out the page, only print the buffer length. */
} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_header_t;
typedef struct bmic_sense_feature_page_io {
struct bmic_sense_feature_page_header header;
uint8_t flags1;
} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_io_t;
typedef struct bmic_sense_feature_page_io_aio_subpage {
struct bmic_sense_feature_page_header header;
uint8_t fw_aio_read_support;
uint8_t driver_aio_read_support;
uint8_t fw_aio_write_support;
uint8_t driver_aio_write_support;
uint16_t max_aio_rw_xfer_crypto_sas_sata; /* in kb */
uint16_t max_aio_rw_xfer_crypto_nvme; /* in kb */
uint16_t max_aio_write_raid5_6; /* in kb */
uint16_t max_aio_write_raid1_10_2drv; /* in kb */
uint16_t max_aio_write_raid1_10_3drv; /* in kb */
} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_io_aio_subpage_t;
typedef struct bmic_sense_feature_aio_buffer {
struct bmic_sense_feature_buffer_header header;
struct bmic_sense_feature_page_io_aio_subpage aio_subpage;
} OS_ATTRIBUTE_PACKED bmic_sense_feature_aio_buffer_t;
typedef struct pqisrc_bmic_flush_cache {
uint8_t disable_cache;
uint8_t power_action;
@ -1067,7 +1218,7 @@ typedef void (*error_callback)(struct pqisrc_softstate *, struct request_contain
typedef struct request_container_block {
void *req;
void *error_info;
REQUEST_STATUS_T status;
int status;
uint32_t tag;
sgt_t *sg_chain_virt;
dma_addr_t sg_chain_dma;
@ -1076,34 +1227,71 @@ typedef struct request_container_block {
struct pqisrc_softstate *softs;
success_callback success_cmp_callback;
error_callback error_cmp_callback;
uint8_t *cdbp;
uint8_t *cdbp; /* points to either the bypass_cdb below or original host cdb */
uint8_t bypass_cdb[16]; /* bypass cmds will use this cdb memory */
int cmdlen;
uint32_t bcount; /* buffer size in byte */
uint32_t ioaccel_handle;
boolean_t encrypt_enable;
struct pqi_enc_info enc_info;
uint32_t row_num;
uint32_t blocks_per_row;
uint32_t raid_map_index;
uint32_t raid_map_row;
ib_queue_t *req_q;
int path;
IO_PATH_T path;
int resp_qid;
boolean_t req_pending;
uint32_t it_nexus[PQISRC_MAX_SUPPORTED_MIRRORS];
boolean_t timedout;
int tm_req;
int aio_retry;
boolean_t is_abort_cmd_from_host; /* true if this is a TMF abort */
boolean_t host_wants_to_abort_this; /* set to true to ID the request targeted by TMF */
uint64_t submit_time_user_secs; /* host submit time in user seconds */
uint64_t host_timeout_ms; /* original host timeout value in msec */
int cm_flags;
void *cm_data; /* pointer to data in kernel space */
bus_dmamap_t cm_datamap;
uint32_t nseg;
union ccb *cm_ccb;
sgt_t *sgt; /* sg table */
}rcb_t;
typedef struct tid_pool {
int tid[PQI_MAX_PHYSICALS];
int index;
}tid_pool_t;
typedef struct bit_map {
boolean_t bit_vector[MAX_TARGET_BIT];
}bit_map_t;
struct pqisrc_softstate {
typedef enum _io_type
{
UNKNOWN_IO_TYPE, /* IO Type is TBD or cannot be determined */
NON_RW_IO_TYPE, /* IO Type is non-Read/Write opcode (could separate BMIC, etc. if we wanted) */
READ_IO_TYPE, /* IO Type is SCSI Read */
WRITE_IO_TYPE, /* IO Type is SCSI Write */
} io_type_t;
typedef enum _counter_types
{
UNKNOWN_COUNTER,
HBA_COUNTER,
RAID0_COUNTER,
RAID1_COUNTER,
RAID5_COUNTER,
RAID6_COUNTER,
MAX_IO_COUNTER,
} counter_types_t;
typedef struct _io_counters
{
OS_ATOMIC64_T raid_read_cnt;
OS_ATOMIC64_T raid_write_cnt;
OS_ATOMIC64_T aio_read_cnt;
OS_ATOMIC64_T aio_write_cnt;
OS_ATOMIC64_T raid_non_read_write;
OS_ATOMIC64_T aio_non_read_write;
} io_counters_t;
typedef struct pqisrc_softstate {
OS_SPECIFIC_T os_specific;
struct ioa_registers *ioa_reg;
struct pqi_registers *pqi_reg;
@ -1120,11 +1308,7 @@ struct pqisrc_softstate {
uint16_t subsysid; /* sub system id */
controller_state_t ctlr_state;
struct dma_mem err_buf_dma_mem;
struct dma_mem admin_queue_dma_mem;
struct dma_mem op_ibq_dma_mem;
struct dma_mem op_obq_dma_mem;
struct dma_mem event_q_dma_mem;
struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ];
struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ + 1];
ib_queue_t admin_ib_queue;
ob_queue_t admin_ob_queue;
ob_queue_t event_q;
@ -1138,12 +1322,14 @@ struct pqisrc_softstate {
uint32_t num_op_obq;
uint32_t num_elem_per_op_ibq;
uint32_t num_elem_per_op_obq;
uint32_t ibq_elem_size;
uint32_t obq_elem_size;
uint32_t max_ibq_elem_size;
uint32_t max_obq_elem_size;
pqi_dev_cap_t pqi_dev_cap;
uint16_t max_ib_iu_length_per_fw;
uint16_t max_ib_iu_length;
unsigned max_sg_per_iu;
uint16_t max_ib_iu_length; /* should be 1152 */
uint16_t max_spanning_elems; /* should be 9 spanning elements */
unsigned max_sg_per_single_iu_element; /* should be 8 */
unsigned max_sg_per_spanning_cmd; /* should be 68, 67 with AIO writes */
uint8_t ib_spanning_supported : 1;
uint8_t ob_spanning_supported : 1;
pqi_event_config_t event_config;
@ -1151,6 +1337,7 @@ struct pqisrc_softstate {
int intr_type;
int intr_count;
int num_cpus_online;
int num_devs;
boolean_t share_opq_and_eventq;
rcb_t *rcb;
#ifndef LOCKFREE_STACK
@ -1162,27 +1349,117 @@ struct pqisrc_softstate {
OS_LOCK_T devlist_lock OS_ATTRIBUTE_ALIGNED(8);
char devlist_lock_name[LOCKNAME_SIZE];
pqi_scsi_dev_t *device_list[PQI_MAX_DEVICES][PQI_MAX_MULTILUN];
pqi_scsi_dev_t *dev_list[PQI_MAX_DEVICES];
OS_SEMA_LOCK_T scan_lock;
uint8_t lun_count[PQI_MAX_DEVICES];
uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS];
uint64_t phys_list_pos;
uint64_t prev_heartbeat_count;
uint64_t *heartbeat_counter_abs_addr;
uint64_t heartbeat_counter_off;
uint8_t *fw_features_section_abs_addr;
uint64_t fw_features_section_off;
uint32_t bus_id;
uint32_t device_id;
uint32_t func_id;
uint8_t adapter_num; /* globally unique adapter number */
char *os_name;
boolean_t ctrl_online;
uint8_t pqi_reset_quiesce_allowed : 1;
boolean_t ctrl_in_pqi_mode;
tid_pool_t tid_pool;
bit_map_t bit_map;
uint32_t adapterQDepth;
uint32_t dma_mem_consumed;
boolean_t adv_aio_capable;
boolean_t aio_raid1_write_bypass;
boolean_t aio_raid5_write_bypass;
boolean_t aio_raid6_write_bypass;
boolean_t enable_stream_detection;
uint16_t max_aio_write_raid5_6; /* bytes */
uint16_t max_aio_write_raid1_10_2drv; /* bytes */
uint16_t max_aio_write_raid1_10_3drv; /* bytes */
uint16_t max_aio_rw_xfer_crypto_nvme; /* bytes */
uint16_t max_aio_rw_xfer_crypto_sas_sata; /* bytes */
io_counters_t counters[MAX_IO_COUNTER];
boolean_t log_io_counters;
boolean_t ld_rescan;
#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
reportlun_data_ext_t *log_dev_list;
size_t log_dev_data_length;
uint32_t num_ptraid_targets;
#endif
boolean_t timeout_in_passthrough;
boolean_t timeout_in_tmf;
};
boolean_t sata_unique_wwn;
boolean_t page83id_in_rpl;
boolean_t err_resp_verbose;
#ifdef DEVICE_HINT
device_hint hint;
#endif
}pqisrc_softstate_t;
struct pqi_config_table {
uint8_t signature[8]; /* "CFGTABLE" */
uint32_t first_section_offset; /* offset in bytes from the base */
/* address of this table to the */
/* first section */
}OS_ATTRIBUTE_PACKED;
struct pqi_config_table_section_header {
uint16_t section_id; /* as defined by the */
/* PQI_CONFIG_TABLE_SECTION_* */
/* manifest constants above */
uint16_t next_section_offset; /* offset in bytes from base */
/* address of the table of the */
/* next section or 0 if last entry */
}OS_ATTRIBUTE_PACKED;
struct pqi_config_table_general_info {
struct pqi_config_table_section_header header;
uint32_t section_length; /* size of this section in bytes */
/* including the section header */
uint32_t max_outstanding_requests; /* max. outstanding */
/* commands supported by */
/* the controller */
uint32_t max_sg_size; /* max. transfer size of a single */
/* command */
uint32_t max_sg_per_request; /* max. number of scatter-gather */
/* entries supported in a single */
/* command */
}OS_ATTRIBUTE_PACKED;
struct pqi_config_table_firmware_features {
struct pqi_config_table_section_header header;
uint16_t num_elements;
uint8_t features_supported[];
/* u8 features_requested_by_host[]; */
/* u8 features_enabled[]; */
/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
/* uint16_t firmware_max_known_feature; */
/* uint16_t host_max_known_feature; */
}OS_ATTRIBUTE_PACKED;
typedef struct pqi_vendor_general_request {
iu_header_t header; /* bytes 0-3 */
uint16_t response_id; /* bytes 4-5 */
uint16_t work; /* bytes 6-7 */
uint16_t request_id;
uint16_t function_code;
union {
struct {
uint16_t first_section;
uint16_t last_section;
uint8_t reserved[48];
} OS_ATTRIBUTE_PACKED config_table_update;
struct {
uint64_t buffer_address;
uint32_t buffer_length;
uint8_t reserved[40];
} OS_ATTRIBUTE_PACKED ofa_memory_allocation;
} data;
}OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t;
typedef struct vpd_logical_volume_status {
uint8_t peripheral_info;

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
* Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -36,7 +36,7 @@ pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
{
OS_ACQUIRE_SPINLOCK(&(taglist->lock));
DBG_FUNC("IN\n");
/* DBG_FUNC("IN\n");*/
ASSERT(taglist->num_elem < taglist->max_elem);
@ -48,7 +48,7 @@ pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
OS_RELEASE_SPINLOCK(&taglist->lock);
DBG_FUNC("OUT\n");
/* DBG_FUNC("OUT\n");*/
}
/*
@ -101,14 +101,14 @@ pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
goto err_out;
}
os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
if(ret){
DBG_ERR("tag lock initialization failed\n");
taglist->lockcreated=false;
goto err_lock;
os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
if(ret){
DBG_ERR("tag lock initialization failed\n");
taglist->lockcreated=false;
goto err_lock;
}
taglist->lockcreated = true;
taglist->lockcreated = true;
/* indices 1 to max_elem are considered as valid tags */
for (i=1; i <= max_elem; i++) {
@ -120,8 +120,8 @@ pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
return ret;
err_lock:
os_mem_free(softs, (char *)taglist->elem_array,
(taglist->max_elem * sizeof(uint32_t)));
os_mem_free(softs, (char *)taglist->elem_array,
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
err_out:
DBG_FUNC("OUT failed\n");
@ -139,10 +139,10 @@ pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
if(taglist->lockcreated==true){
os_uninit_spinlock(&taglist->lock);
taglist->lockcreated = false;
}
if(taglist->lockcreated==true){
os_uninit_spinlock(&taglist->lock);
taglist->lockcreated = false;
}
DBG_FUNC("OUT\n");
}
@ -215,7 +215,7 @@ pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
union head_list cur_head, new_head;
DBG_FUNC("IN\n");
DBG_INFO("push tag :%d\n",index);
DBG_INFO("push tag :%u\n",index);
if (index >= stack->max_elem) {
ASSERT(false);
@ -264,7 +264,7 @@ pqisrc_get_tag(lockless_stack_t *stack)
stack->next_index_array[cur_head.top.index] = 0;
stack->num_elem--;
DBG_INFO("pop tag: %d\n",cur_head.top.index);
DBG_INFO("pop tag: %u\n",cur_head.top.index);
DBG_FUNC("OUT\n");
return cur_head.top.index; /*tag*/
}

View File

@ -4,7 +4,7 @@ KMOD = smartpqi
.PATH: ${SRCTOP}/sys/dev/${KMOD}
SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c
SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c smartpqi_features.c
SRCS+= device_if.h bus_if.h pci_if.h opt_scsi.h opt_cam.h