martpqi(4):

- Microsemi SCSI driver for PQI controllers.
- Found on newer model HP servers.
- Restrict to AMD64 only as per developer request.

The driver provides support for the new generation of PQI controllers
from Microsemi. This driver is the first SCSI driver to implement the PQI
queuing model and it will replace the aacraid driver for Adaptec Series 9
controllers.  HARDWARE Controllers supported by the driver include:

    HPE Gen10 Smart Array Controller Family
    OEM Controllers based on the Microsemi Chipset.

Submitted by:   deepak.ukey@microsemi.com
Relnotes:       yes
Sponsored by:   Microsemi
Differential Revision:   https://reviews.freebsd.org/D14514
This commit is contained in:
Sean Bruno 2018-04-26 16:59:06 +00:00
parent 27d86a1ca4
commit 1e66f787c8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=333019
28 changed files with 11815 additions and 0 deletions

View File

@ -476,6 +476,7 @@ MAN= aac.4 \
sio.4 \
sis.4 \
sk.4 \
smartpqi.4 \
smb.4 \
smbus.4 \
smp.4 \

102
share/man/man4/smartpqi.4 Normal file
View File

@ -0,0 +1,102 @@
.\" Copyright (c) 2018 Murthy Bhat
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$ stable/10/share/man/man4/smartpqi.4 195614 2017-01-11 08:10:18Z jkim $
.Dd April 06, 2018
.Dt SMARTPQI 4
.Os
.Sh NAME
.Nm smartpqi
.Nd Microsemi smartpqi SCSI driver for PQI controllers
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd device pci
.Cd device scbus
.Cd device smartpqi
.Pp
Alternatively, to load the driver as a
module at boot time, place the following line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
smartpqi_load="YES"
.Ed
.Sh DESCRIPTION
The
.Nm
SCSI driver provides support for the new generation of PQI controllers from
Microsemi.
The
.Nm
driver is the first SCSI driver to implement the PQI queuing model.
.Pp
The
.Nm
driver will replace the aacraid driver for Adaptec Series 9 controllers.
.Pp
The
.Pa /dev/smartpqi?
device nodes provide access to the management interface of the controller.
One node exists per installed card.
.Sh HARDWARE
Controllers supported by the
.Nm
driver include:
.Pp
.Bl -bullet -compact
.It
HPE Gen10 Smart Array Controller Family
.It
OEM Controllers based on the Microsemi Chipset
.El
.Sh FILES
.Bl -tag -width /boot/kernel/aac.ko -compact
.It Pa /dev/smartpqi?
smartpqi management interface
.El
.Sh SEE ALSO
.Xr kld 4 ,
.Xr linux 4 ,
.Xr scsi 4 ,
.Xr kldload 8
.Xr pass 4
.Xr xpt 4
.Xr loader.conf 5
.Xr camcontrol 8
.Rs
.%T "Microsemi Website"
.%U http://www.microsemi.com/
.Re
.Sh HISTORY
The
.Nm
driver first appeared in
.Fx 11.1 .
.Sh AUTHOR
.An Murthy Bhat
.Aq murthy.bhat@microsemi.com
.Sh BUGS
The controller is not actually paused on suspend/resume.

View File

@ -166,6 +166,7 @@ device iir # Intel Integrated RAID
device ips # IBM (Adaptec) ServeRAID
device mly # Mylex AcceleRAID/eXtremeRAID
device twa # 3ware 9000 series PATA/SATA RAID
device smartpqi # Microsemi smartpqi driver
device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller
# RAID controllers

View File

@ -1716,6 +1716,13 @@ device mfip # LSI MegaRAID SAS passthrough, requires CAM
options MFI_DEBUG
device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s
#
# Microsemi smartpqi controllers.
# These controllers have a SCSI-like interface, and require the
# CAM infrastructure.
#
device smartpqi
#
# 3ware ATA RAID
#

View File

@ -463,6 +463,22 @@ dev/sio/sio_isa.c optional sio isa
dev/sio/sio_pccard.c optional sio pccard
dev/sio/sio_pci.c optional sio pci
dev/sio/sio_puc.c optional sio puc
dev/smartpqi/smartpqi_cam.c optional smartpqi
dev/smartpqi/smartpqi_cmd.c optional smartpqi
dev/smartpqi/smartpqi_discovery.c optional smartpqi
dev/smartpqi/smartpqi_event.c optional smartpqi
dev/smartpqi/smartpqi_helper.c optional smartpqi
dev/smartpqi/smartpqi_init.c optional smartpqi
dev/smartpqi/smartpqi_intr.c optional smartpqi
dev/smartpqi/smartpqi_ioctl.c optional smartpqi
dev/smartpqi/smartpqi_main.c optional smartpqi
dev/smartpqi/smartpqi_mem.c optional smartpqi
dev/smartpqi/smartpqi_misc.c optional smartpqi
dev/smartpqi/smartpqi_queue.c optional smartpqi
dev/smartpqi/smartpqi_request.c optional smartpqi
dev/smartpqi/smartpqi_response.c optional smartpqi
dev/smartpqi/smartpqi_sis.c optional smartpqi
dev/smartpqi/smartpqi_tag.c optional smartpqi
dev/speaker/spkr.c optional speaker
dev/syscons/apm/apm_saver.c optional apm_saver apm
dev/syscons/scterm-teken.c optional sc

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Function to submit the request to the adapter.
*/
int pqisrc_submit_cmnd(pqisrc_softstate_t *softs,
ib_queue_t *ib_q, void *req)
{
char *slot = NULL;
uint32_t offset;
iu_header_t *hdr = (iu_header_t *)req;
uint32_t iu_len = hdr->iu_length + 4 ; /* header size */
int i = 0;
DBG_FUNC("IN\n");
PQI_LOCK(&ib_q->lock);
/* Check queue full */
if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) {
DBG_WARN("OUT Q full\n");
PQI_UNLOCK(&ib_q->lock);
return PQI_STATUS_QFULL;
}
/* Get the slot */
offset = ib_q->pi_local * ib_q->elem_size;
slot = ib_q->array_virt_addr + offset;
/* Copy the IU */
memcpy(slot, req, iu_len);
DBG_INFO("IU : \n");
for(i = 0; i< iu_len; i++)
DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem;
DBG_INFO("ib_q->pi_local : %x IU size : %d\n",
ib_q->pi_local, hdr->iu_length);
DBG_INFO("*ib_q->ci_virt_addr: %x\n",
*(ib_q->ci_virt_addr));
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
PQI_UNLOCK(&ib_q->lock);
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,439 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include"smartpqi_includes.h"
/*
* Function to rescan the devices connected to adapter.
*/
int
pqisrc_rescan_devices(pqisrc_softstate_t *softs)
{
int ret;
DBG_FUNC("IN\n");
os_sema_lock(&softs->scan_lock);
ret = pqisrc_scan_devices(softs);
os_sema_unlock(&softs->scan_lock);
DBG_FUNC("OUT\n");
return ret;
}
/*
* Subroutine to acknowledge the events processed by the driver to the adapter.
*/
static void
pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
struct pqi_event *event)
{
pqi_event_acknowledge_request_t request;
ib_queue_t *ib_q = &softs->op_raid_ib_q[0];
int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT;
memset(&request,0,sizeof(request));
DBG_FUNC("IN\n");
request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) -
PQI_REQUEST_HEADER_LENGTH);
request.event_type = event->event_type;
request.event_id = event->event_id;
request.additional_event_id = event->additional_event_id;
/* Submit Event Acknowledge */
pqisrc_submit_cmnd(softs, ib_q, &request);
/*
* We have to special-case this type of request because the firmware
* does not generate an interrupt when this type of request completes.
* Therefore, we have to poll until we see that the firmware has
* consumed the request before we move on.
*/
COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo);
if (tmo <= 0) {
DBG_ERR("wait for event acknowledge timed out\n");
DBG_ERR("tmo : %d\n",tmo);
}
DBG_FUNC(" OUT\n");
}
/*
* Acknowledge processed events to the adapter.
*/
void
pqisrc_ack_all_events(void *arg1)
{
int i;
struct pqi_event *pending_event;
pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1;
DBG_FUNC(" IN\n");
pending_event = &softs->pending_events[0];
for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (pending_event->pending == true) {
pending_event->pending = false;
pqisrc_acknowledge_event(softs, pending_event);
}
pending_event++;
}
/* Rescan devices except for heartbeat event */
if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) {
DBG_ERR(" Failed to Re-Scan devices\n ");
}
DBG_FUNC(" OUT\n");
}
/*
* Get event index from event type to validate the type of event.
*/
static int
pqisrc_event_type_to_event_index(unsigned event_type)
{
int index;
switch (event_type) {
case PQI_EVENT_TYPE_HOTPLUG:
index = PQI_EVENT_HOTPLUG;
break;
case PQI_EVENT_TYPE_HARDWARE:
index = PQI_EVENT_HARDWARE;
break;
case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
index = PQI_EVENT_PHYSICAL_DEVICE;
break;
case PQI_EVENT_TYPE_LOGICAL_DEVICE:
index = PQI_EVENT_LOGICAL_DEVICE;
break;
case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
index = PQI_EVENT_AIO_STATE_CHANGE;
break;
case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
index = PQI_EVENT_AIO_CONFIG_CHANGE;
break;
default:
index = -1;
break;
}
return index;
}
/*
* Function used to process the events supported by the adapter.
*/
int
pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
{
uint32_t obq_pi,obq_ci;
pqi_event_response_t response;
ob_queue_t *event_q;
struct pqi_event *pending_event;
boolean_t need_delayed_work = false;
DBG_FUNC(" IN\n");
OS_ATOMIC64_INC(softs, num_intrs);
event_q = &softs->event_q;
obq_ci = event_q->ci_local;
obq_pi = *(event_q->pi_virt_addr);
DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi);
while(1) {
int event_index;
DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi);
if (obq_pi == obq_ci)
break;
need_delayed_work = true;
/* Copy the response */
memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size),
sizeof(pqi_event_response_t));
DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type);
DBG_INFO("response.event_type : 0x%x \n", response.event_type);
event_index = pqisrc_event_type_to_event_index(response.event_type);
if (event_index >= 0) {
if(response.request_acknowledge) {
pending_event = &softs->pending_events[event_index];
pending_event->pending = true;
pending_event->event_type = response.event_type;
pending_event->event_id = response.event_id;
pending_event->additional_event_id = response.additional_event_id;
}
}
obq_ci = (obq_ci + 1) % event_q->num_elem;
}
/* Update CI */
event_q->ci_local = obq_ci;
PCI_MEM_PUT32(softs, event_q->ci_register_abs,
event_q->ci_register_offset, event_q->ci_local);
/*Adding events to the task queue for acknowledging*/
if (need_delayed_work == true) {
os_eventtaskqueue_enqueue(softs);
}
DBG_FUNC("OUT");
return PQI_STATUS_SUCCESS;
}
/*
* Function used to send a general management request to adapter.
*/
int pqisrc_submit_management_req(pqisrc_softstate_t *softs,
pqi_event_config_request_t *request)
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0];
rcb_t *rcb = NULL;
DBG_FUNC(" IN\n");
/* Get the tag */
request->request_id = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == request->request_id) {
DBG_ERR("Tag not available\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
rcb = &softs->rcb[request->request_id];
rcb->req_pending = true;
rcb->tag = request->request_id;
/* Submit command on operational raid ib queue */
ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR(" Unable to submit command\n");
goto err_cmd;
}
ret = pqisrc_wait_on_condition(softs, rcb);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Management request timed out !!\n");
goto err_cmd;
}
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist,request->request_id);
DBG_FUNC("OUT\n");
return ret;
err_cmd:
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist,request->request_id);
err_out:
DBG_FUNC(" failed OUT : %d\n", ret);
return ret;
}
/*
* Build and send the general management request.
*/
static int
pqi_event_configure(pqisrc_softstate_t *softs ,
pqi_event_config_request_t *request,
dma_mem_t *buff)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC(" IN\n");
request->header.comp_feature = 0x00;
request->header.iu_length = sizeof(pqi_event_config_request_t) -
PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */
/*Op OQ id where response to be delivered */
request->response_queue_id = softs->op_ob_q[0].q_id;
request->buffer_length = buff->size;
request->sg_desc.addr = buff->dma_addr;
request->sg_desc.length = buff->size;
request->sg_desc.zero = 0;
request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT;
/* submit management req IU*/
ret = pqisrc_submit_management_req(softs,request);
if(ret)
goto err_out;
DBG_FUNC(" OUT\n");
return ret;
err_out:
DBG_FUNC("Failed OUT\n");
return ret;
}
/*
* Prepare REPORT EVENT CONFIGURATION IU to request that
* event configuration information be reported.
*/
int pqisrc_report_event_config(pqisrc_softstate_t *softs)
{
int ret,i ;
pqi_event_config_request_t request;
pqi_event_config_t *event_config_p ;
dma_mem_t buf_report_event ;
/*bytes to be allocaed for report event config data-in buffer */
uint32_t alloc_size = sizeof(pqi_event_config_t) ;
memset(&request, 0 , sizeof(request));
DBG_FUNC(" IN\n");
memset(&buf_report_event, 0, sizeof(struct dma_mem));
buf_report_event.tag = "pqi_report_event_buf" ;
buf_report_event.size = alloc_size;
buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN;
/* allocate memory */
ret = os_dma_mem_alloc(softs, &buf_report_event);
if (ret) {
DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret);
goto err_out;
}
DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr);
DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr);
request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
/* Event configuration */
ret=pqi_event_configure(softs,&request,&buf_report_event);
if(ret)
goto free_mem;
event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr;
softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors,
PQI_MAX_EVENT_DESCRIPTORS) ;
for (i=0; i < softs->event_config.num_event_descriptors ;i++){
softs->event_config.descriptors[i].event_type =
event_config_p->descriptors[i].event_type;
}
/* free the allocated memory*/
os_dma_mem_free(softs, &buf_report_event);
DBG_FUNC(" OUT\n");
return ret;
free_mem:
os_dma_mem_free(softs, &buf_report_event);
err_out:
DBG_FUNC("Failed OUT\n");
return PQI_STATUS_FAILURE;
}
/*
* Prepare SET EVENT CONFIGURATION IU to request that
* event configuration parameters be set.
*/
int pqisrc_set_event_config(pqisrc_softstate_t *softs)
{
int ret,i;
pqi_event_config_request_t request;
pqi_event_config_t *event_config_p;
dma_mem_t buf_set_event;
/*bytes to be allocaed for set event config data-out buffer */
uint32_t alloc_size = sizeof(pqi_event_config_t);
memset(&request, 0 , sizeof(request));
DBG_FUNC(" IN\n");
memset(&buf_set_event, 0, sizeof(struct dma_mem));
buf_set_event.tag = "pqi_set_event_buf";
buf_set_event.size = alloc_size;
buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN;
/* allocate memory */
ret = os_dma_mem_alloc(softs, &buf_set_event);
if (ret) {
DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret);
goto err_out;
}
DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr);
DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr);
request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG;
request.iu_specific.global_event_oq_id = softs->event_q.q_id;
/*pointer to data-out buffer*/
event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr;
event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors;
for (i=0; i < softs->event_config.num_event_descriptors ; i++){
event_config_p->descriptors[i].event_type =
softs->event_config.descriptors[i].event_type;
if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1)
event_config_p->descriptors[i].oq_id = softs->event_q.q_id;
else
event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */
}
/* Event configuration */
ret = pqi_event_configure(softs,&request,&buf_set_event);
if(ret)
goto free_mem;
os_dma_mem_free(softs, &buf_set_event);
DBG_FUNC(" OUT\n");
return ret;
free_mem:
os_dma_mem_free(softs, &buf_set_event);
err_out:
DBG_FUNC("Failed OUT\n");
return PQI_STATUS_FAILURE;
}

View File

@ -0,0 +1,291 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Function used to validate the adapter health.
*/
boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
DBG_FUNC("OUT\n");
return !softs->ctrl_online;
}
/*
* Function used to take exposed devices to OS as offline.
*/
void pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
{
pqi_scsi_dev_t *device = NULL;
int i,j;
DBG_FUNC("IN\n");
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
if(softs->device_list[i][j] == NULL)
continue;
device = softs->device_list[i][j];
pqisrc_remove_device(softs, device);
}
}
DBG_FUNC("OUT\n");
}
/*
* Function used to take adapter offline.
*/
void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->ctrl_online = false;
pqisrc_trigger_nmi_sis(softs);
os_complete_outstanding_cmds_nodevice(softs);
pqisrc_take_devices_offline(softs);
DBG_FUNC("OUT\n");
}
/*
* Timer handler for the adapter heart-beat.
*/
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
{
uint64_t num_intrs;
uint8_t take_offline = false;
DBG_FUNC("IN\n");
num_intrs = OS_ATOMIC64_READ(softs, num_intrs);
if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
take_offline = true;
goto take_ctrl_offline;
}
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
softs->prev_heartbeat_count = %lx\n",
CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count);
} else {
if (num_intrs == softs->prev_num_intrs) {
softs->num_heartbeats_requested++;
if (softs->num_heartbeats_requested > PQI_MAX_HEARTBEAT_REQUESTS) {
take_offline = true;
goto take_ctrl_offline;
}
softs->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
pqisrc_ack_all_events((void*)softs);
} else {
softs->num_heartbeats_requested = 0;
}
softs->prev_num_intrs = num_intrs;
}
take_ctrl_offline:
if (take_offline){
DBG_ERR("controller is offline\n");
pqisrc_take_ctrl_offline(softs);
os_stop_heartbeat_timer(softs);
}
DBG_FUNC("OUT\n");
}
/*
* Conditional variable management routine for internal commands.
*/
int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){
DBG_FUNC("IN\n");
int ret = PQI_STATUS_SUCCESS;
uint32_t loop_cnt = 0;
while (rcb->req_pending == true) {
OS_SLEEP(500); /* Micro sec */
/*Polling needed for FreeBSD : since ithread routine is not scheduled
during bootup, we could use polling until interrupts are
enabled (using 'if (cold)'to check for the boot time before
interrupts are enabled). */
IS_POLLING_REQUIRED(softs);
if (loop_cnt++ == PQISRC_CMD_TIMEOUT_CNT) {
DBG_ERR("ERR: Requested cmd timed out !!!\n");
ret = PQI_STATUS_TIMEOUT;
break;
}
if (pqisrc_ctrl_offline(softs)) {
DBG_ERR("Controller is Offline");
ret = PQI_STATUS_FAILURE;
break;
}
}
rcb->req_pending = true;
DBG_FUNC("OUT\n");
return ret;
}
/* Function used to validate the device wwid. */
boolean_t pqisrc_device_equal(pqi_scsi_dev_t *dev1,
pqi_scsi_dev_t *dev2)
{
return dev1->wwid == dev2->wwid;
}
/* Function used to validate the device scsi3addr. */
boolean_t pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
{
return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
}
/* Function used to validate hba_lunid */
boolean_t pqisrc_is_hba_lunid(uint8_t *scsi3addr)
{
return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID);
}
/* Function used to validate type of device */
boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *device)
{
return !device->is_physical_device;
}
/* Function used to sanitize inquiry string */
void pqisrc_sanitize_inquiry_string(unsigned char *s, int len)
{
boolean_t terminated = false;
DBG_FUNC("IN\n");
for (; len > 0; (--len, ++s)) {
if (*s == 0)
terminated = true;
if (terminated || *s < 0x20 || *s > 0x7e)
*s = ' ';
}
DBG_FUNC("OUT\n");
}
static char *raid_levels[] = {
"RAID 0",
"RAID 4",
"RAID 1(1+0)",
"RAID 5",
"RAID 5+1",
"RAID ADG",
"RAID 1(ADM)",
"RAID 6",
};
/* Get the RAID level from the index */
char *pqisrc_raidlevel_to_string(uint8_t raid_level)
{
DBG_FUNC("IN\n");
if (raid_level < ARRAY_SIZE(raid_levels))
return raid_levels[raid_level];
DBG_FUNC("OUT\n");
return " ";
}
/* Debug routine for displaying device info */
void pqisrc_display_device_info(pqisrc_softstate_t *softs,
char *action, pqi_scsi_dev_t *device)
{
DBG_INFO( "%s scsi BTL %d:%d:%d: %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
action,
device->bus,
device->target,
device->lun,
device->vendor,
device->model,
pqisrc_raidlevel_to_string(device->raid_level),
device->offload_config ? '+' : '-',
device->offload_enabled_pending ? '+' : '-',
device->expose_device ? '+' : '-',
device->queue_depth);
pqisrc_raidlevel_to_string(device->raid_level); /* To use this function */
}
/* validate the structure sizes */
void check_struct_sizes()
{
ASSERT(sizeof(SCSI3Addr_struct)== 2);
ASSERT(sizeof(PhysDevAddr_struct) == 8);
ASSERT(sizeof(LogDevAddr_struct)== 8);
ASSERT(sizeof(LUNAddr_struct)==8);
ASSERT(sizeof(RequestBlock_struct) == 20);
ASSERT(sizeof(MoreErrInfo_struct)== 8);
ASSERT(sizeof(ErrorInfo_struct)== 48);
ASSERT(sizeof(IOCTL_Command_struct)== 86);
ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42);
ASSERT(sizeof(struct bmic_host_wellness_time)== 20);
ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8);
ASSERT(sizeof(struct admin_q_param)== 4);
ASSERT(sizeof(struct pqi_registers)== 256);
ASSERT(sizeof(struct ioa_registers)== 4128);
ASSERT(sizeof(struct pqi_pref_settings)==4);
ASSERT(sizeof(struct pqi_cap)== 20);
ASSERT(sizeof(iu_header_t)== 4);
ASSERT(sizeof(gen_adm_req_iu_t)== 64);
ASSERT(sizeof(gen_adm_resp_iu_t)== 64);
ASSERT(sizeof(op_q_params) == 9);
ASSERT(sizeof(raid_path_error_info_elem_t)== 276);
ASSERT(sizeof(aio_path_error_info_elem_t)== 276);
ASSERT(sizeof(struct init_base_struct)== 24);
ASSERT(sizeof(pqi_iu_layer_desc_t)== 16);
ASSERT(sizeof(pqi_dev_cap_t)== 576);
ASSERT(sizeof(pqi_aio_req_t)== 128);
ASSERT(sizeof(pqisrc_raid_req_t)== 128);
ASSERT(sizeof(pqi_tmf_req_t)== 32);
ASSERT(sizeof(struct pqi_io_response)== 16);
ASSERT(sizeof(struct sense_header_scsi)== 8);
ASSERT(sizeof(reportlun_header_t)==8);
ASSERT(sizeof(reportlun_ext_entry_t)== 24);
ASSERT(sizeof(reportlun_data_ext_t)== 32);
ASSERT(sizeof(raidmap_data_t)==8);
ASSERT(sizeof(pqisrc_raid_map_t)== 8256);
ASSERT(sizeof(bmic_ident_ctrl_t)== 325);
ASSERT(sizeof(bmic_ident_physdev_t)==2048);
}

View File

@ -0,0 +1,90 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _PQI_INCLUDES_H
#define _PQI_INCLUDES_H
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/disk.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/rman.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/condvar.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/sema.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <machine/cpufunc.h>
#include <sys/cpu.h>
#include <sys/pcpu.h>
#include <sys/time.h>
#include <sys/clock.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
#include <cam/cam_periph.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <cam/cam_queue.h>
#include <cam/cam_xpt_periph.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/md_var.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include "smartpqi_defines.h"
#include "smartpqi_structures.h"
#include "smartpqi_prototypes.h"
#include "smartpqi_ioctl.h"
#endif // _PQI_INCLUDES_H

View File

@ -0,0 +1,913 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Request the adapter to get PQI capabilities supported.
*/
static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
dma_mem_t pqi_cap_dma_buf;
pqi_dev_cap_t *capability = NULL;
pqi_iu_layer_desc_t *iu_layer_desc = NULL;
/* Allocate Non DMA memory */
capability = os_mem_alloc(softs, sizeof(*capability));
if (!capability) {
DBG_ERR("Failed to allocate memory for capability\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
pqi_cap_dma_buf.tag = "pqi_cap_buf";
pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
if (ret) {
DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
goto err_dma_alloc;
}
admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
admin_req.req_type.general_func.sg_desc.type = SGL_DESCRIPTOR_CODE_DATA_BLOCK;
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
if( PQI_STATUS_SUCCESS == ret) {
memcpy(capability,
pqi_cap_dma_buf.virt_addr,
pqi_cap_dma_buf.size);
} else {
DBG_ERR("Failed to send admin req report pqi device capability\n");
goto err_admin_req;
}
softs->pqi_dev_cap.max_iqs = capability->max_iqs;
softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
softs->pqi_dev_cap.max_oqs = capability->max_oqs;
softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
DBG_INFO("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
DBG_INFO("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
DBG_INFO("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
DBG_INFO("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
DBG_INFO("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
DBG_INFO("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
DBG_INFO("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
DBG_INFO("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
DBG_INFO("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
DBG_INFO("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
DBG_INFO("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
os_mem_free(softs, (void *)capability,
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
os_dma_mem_free(softs, &pqi_cap_dma_buf);
DBG_FUNC("OUT\n");
return ret;
err_admin_req:
os_dma_mem_free(softs, &pqi_cap_dma_buf);
err_dma_alloc:
if (capability)
os_mem_free(softs, (void *)capability,
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
err_out:
DBG_FUNC("failed OUT\n");
return PQI_STATUS_FAILURE;
}
/*
* Function used to deallocate the used rcb.
*/
void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
{
uint32_t num_req;
size_t size;
int i;
DBG_FUNC("IN\n");
num_req = softs->max_outstanding_io + 1;
size = num_req * sizeof(rcb_t);
for (i = 1; i < req_count; i++)
os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
os_mem_free(softs, (void *)softs->rcb, size);
softs->rcb = NULL;
DBG_FUNC("OUT\n");
}
/*
* Allocate memory for rcb and SG descriptors.
*/
static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
uint32_t num_req = 0;
uint32_t sg_buf_size = 0;
uint64_t alloc_size = 0;
rcb_t *rcb = NULL;
rcb_t *prcb = NULL;
DBG_FUNC("IN\n");
/* Set maximum outstanding requests */
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
* The rcb will be accessed by using the tag as index
* As 0 tag index is not used, we need to allocate one extra.
*/
softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
num_req = softs->max_outstanding_io + 1;
DBG_INFO("Max Outstanding IO reset to %d\n", num_req);
alloc_size = num_req * sizeof(rcb_t);
/* Allocate Non DMA memory */
rcb = os_mem_alloc(softs, alloc_size);
if (!rcb) {
DBG_ERR("Failed to allocate memory for rcb\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
softs->rcb = rcb;
/* Allocate sg dma memory for sg chain */
sg_buf_size = softs->pqi_cap.max_sg_elem *
sizeof(sgt_t);
prcb = &softs->rcb[1];
/* Initialize rcb */
for(i=1; i < num_req; i++) {
char tag[15];
sprintf(tag, "sg_dma_buf%d", i);
softs->sg_dma_desc[i].tag = tag;
softs->sg_dma_desc[i].size = sg_buf_size;
softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
if (ret) {
DBG_ERR("Failed to Allocate sg desc %d\n", ret);
ret = PQI_STATUS_FAILURE;
goto error;
}
prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
prcb ++;
}
DBG_FUNC("OUT\n");
return ret;
error:
pqisrc_free_rcb(softs, i);
err_out:
DBG_FUNC("failed OUT\n");
return ret;
}
/*
* Function used to decide the operational queue configuration params
* - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
*/
void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
{
uint16_t total_iq_elements;
DBG_FUNC("IN\n");
DBG_INFO("softs->intr_count : %d softs->num_cpus_online : %d",
softs->intr_count, softs->num_cpus_online);
if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
/* Share the event and Operational queue. */
softs->num_op_obq = 1;
softs->share_opq_and_eventq = true;
}
else {
/* Note : One OBQ (OBQ0) reserved for event queue */
softs->num_op_obq = MIN(softs->num_cpus_online,
softs->intr_count) - 1;
softs->num_op_obq = softs->intr_count - 1;
softs->share_opq_and_eventq = false;
}
#ifdef MULTIPLE_MSIX
/*
* softs->num_cpus_online is set as number of physical CPUs,
* So we can have more queues/interrupts .
*/
if (softs->intr_count > 1)
softs->share_opq_and_eventq = false;
#endif
DBG_INFO("softs->num_op_obq : %d\n",softs->num_op_obq);
softs->num_op_raid_ibq = softs->num_op_obq;
softs->num_op_aio_ibq = softs->num_op_raid_ibq;
softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
if (softs->max_ib_iu_length_per_fw == 256 &&
softs->ob_spanning_supported) {
/* older f/w that doesn't actually support spanning. */
softs->max_ib_iu_length = softs->ibq_elem_size;
} else {
/* max. inbound IU length is an multiple of our inbound element size. */
softs->max_ib_iu_length =
(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
softs->ibq_elem_size;
}
/* If Max. Outstanding IO came with Max. Spanning element count then,
needed elements per IO are multiplication of
Max.Outstanding IO and Max.Spanning element */
total_iq_elements = (softs->max_outstanding_io *
(softs->max_ib_iu_length / softs->ibq_elem_size));
softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
softs->pqi_dev_cap.max_iq_elements);
softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
softs->pqi_dev_cap.max_oq_elements);
softs->max_sg_per_iu = ((softs->max_ib_iu_length -
softs->ibq_elem_size) /
sizeof(sgt_t)) +
MAX_EMBEDDED_SG_IN_FIRST_IU;
DBG_INFO("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
DBG_INFO("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
DBG_INFO("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
DBG_INFO("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
DBG_FUNC("OUT\n");
}
/*
* Configure the operational queue parameters.
*/
int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
/* Get the PQI capability,
REPORT PQI DEVICE CAPABILITY request */
ret = pqisrc_report_pqi_capability(softs);
if (ret) {
DBG_ERR("Failed to send report pqi dev capability request : %d\n",
ret);
goto err_out;
}
/* Reserve required no of slots for internal requests */
softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
/* Decide the Op queue configuration */
pqisrc_decide_opq_config(softs);
DBG_FUNC("OUT\n");
return ret;
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/*
* Validate the PQI mode of adapter.
*/
int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
int tmo = 0;
uint64_t signature = 0;
DBG_FUNC("IN\n");
/* Check the PQI device signature */
tmo = PQISRC_PQIMODE_READY_TIMEOUT;
do {
signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
sizeof(uint64_t)) == 0) {
ret = PQI_STATUS_SUCCESS;
break;
}
OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
} while (tmo--);
PRINT_PQI_SIGNATURE(signature);
if (tmo <= 0) {
DBG_ERR("PQI Signature is invalid\n");
ret = PQI_STATUS_TIMEOUT;
goto err_out;
}
tmo = PQISRC_PQIMODE_READY_TIMEOUT;
/* Check function and status code for the device */
COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
if (!tmo) {
DBG_ERR("PQI device is not in IDLE state\n");
ret = PQI_STATUS_TIMEOUT;
goto err_out;
}
tmo = PQISRC_PQIMODE_READY_TIMEOUT;
/* Check the PQI device status register */
COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
PQI_DEV_STATE_AT_INIT, tmo);
if (!tmo) {
DBG_ERR("PQI Registers are not ready\n");
ret = PQI_STATUS_TIMEOUT;
goto err_out;
}
DBG_FUNC("OUT\n");
return ret;
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/*
* Get the PQI configuration table parameters.
* Currently using for heart-beat counter scratch-pad register.
*/
int pqisrc_process_config_table(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
uint32_t config_table_size;
uint32_t section_off;
uint8_t *config_table_abs_addr;
struct pqi_conf_table *conf_table;
struct pqi_conf_table_section_header *section_hdr;
config_table_size = softs->pqi_cap.conf_tab_sz;
if (config_table_size < sizeof(*conf_table) ||
config_table_size > PQI_CONF_TABLE_MAX_LEN) {
DBG_ERR("Invalid PQI conf table length of %u\n",
config_table_size);
return ret;
}
conf_table = os_mem_alloc(softs, config_table_size);
if (!conf_table) {
DBG_ERR("Failed to allocate memory for PQI conf table\n");
return ret;
}
config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
softs->pqi_cap.conf_tab_off);
PCI_MEM_GET_BUF(softs, config_table_abs_addr,
softs->pqi_cap.conf_tab_off,
(uint8_t*)conf_table, config_table_size);
if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
sizeof(conf_table->sign)) != 0) {
DBG_ERR("Invalid PQI config signature\n");
goto out;
}
section_off = LE_32(conf_table->first_section_off);
while (section_off) {
if (section_off+ sizeof(*section_hdr) >= config_table_size) {
DBG_ERR("PQI config table section offset (%u) beyond \
end of config table (config table length: %u)\n",
section_off, config_table_size);
break;
}
section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
switch (LE_16(section_hdr->section_id)) {
case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
case PQI_CONF_TABLE_SECTION_DEBUG:
break;
case PQI_CONF_TABLE_SECTION_HEARTBEAT:
softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
section_off +
offsetof(struct pqi_conf_table_heartbeat,
heartbeat_counter);
softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
softs->heartbeat_counter_off);
ret = PQI_STATUS_SUCCESS;
break;
default:
DBG_ERR("unrecognized PQI config table section ID: 0x%x\n",
LE_16(section_hdr->section_id));
break;
}
section_off = LE_16(section_hdr->next_section_off);
}
out:
os_mem_free(softs, (void *)conf_table,config_table_size);
return ret;
}
/* Wait for PQI reset completion for the adapter*/
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
pqi_reset_reg_t reset_reg;
int pqi_reset_timeout = 0;
uint64_t val = 0;
uint32_t max_timeout = 0;
val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
max_timeout = (val & 0xFFFF00000000) >> 32;
DBG_INFO("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
while(1) {
if (pqi_reset_timeout++ == max_timeout) {
return PQI_STATUS_TIMEOUT;
}
OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
reset_reg.all_bits = PCI_MEM_GET32(softs,
&softs->pqi_reg->dev_reset, PQI_DEV_RESET);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
}
return ret;
}
/*
* Function used to perform PQI hard reset.
*/
int pqi_reset(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t val = 0;
pqi_reset_reg_t pqi_reset_reg;
DBG_FUNC("IN\n");
if (true == softs->ctrl_in_pqi_mode) {
if (softs->pqi_reset_quiesce_allowed) {
val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR);
val |= SIS_PQI_RESET_QUIESCE;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(val));
ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
if (ret) {
DBG_ERR("failed with error %d during quiesce\n", ret);
return ret;
}
}
pqi_reset_reg.all_bits = 0;
pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
LE_32(pqi_reset_reg.all_bits));
ret = pqisrc_wait_for_pqi_reset_completion(softs);
if (ret) {
DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
return ret;
}
}
softs->ctrl_in_pqi_mode = false;
DBG_FUNC("OUT\n");
return ret;
}
/*
* Initialize the adapter with supported PQI configuration.
*/
int pqisrc_pqi_init(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
/* Check the PQI signature */
ret = pqisrc_check_pqimode(softs);
if(ret) {
DBG_ERR("failed to switch to pqi\n");
goto err_out;
}
PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
softs->ctrl_in_pqi_mode = true;
/* Get the No. of Online CPUs,NUMA/Processor config from OS */
ret = os_get_processor_config(softs);
if (ret) {
DBG_ERR("Failed to get processor config from OS %d\n",
ret);
goto err_out;
}
/* Get the interrupt count, type, priority available from OS */
ret = os_get_intr_config(softs);
if (ret) {
DBG_ERR("Failed to get interrupt config from OS %d\n",
ret);
goto err_out;
}
/* Create Admin Queue pair*/
ret = pqisrc_create_admin_queue(softs);
if(ret) {
DBG_ERR("Failed to configure admin queue\n");
goto err_admin_queue;
}
/* For creating event and IO operational queues we have to submit
admin IU requests.So Allocate resources for submitting IUs */
/* Allocate the request container block (rcb) */
ret = pqisrc_allocate_rcb(softs);
if (ret == PQI_STATUS_FAILURE) {
DBG_ERR("Failed to allocate rcb \n");
goto err_rcb;
}
/* Allocate & initialize request id queue */
ret = pqisrc_init_taglist(softs,&softs->taglist,
softs->max_outstanding_io);
if (ret) {
DBG_ERR("Failed to allocate memory for request id q : %d\n",
ret);
goto err_taglist;
}
ret = pqisrc_configure_op_queues(softs);
if (ret) {
DBG_ERR("Failed to configure op queue\n");
goto err_config_opq;
}
/* Create Operational queues */
ret = pqisrc_create_op_queues(softs);
if(ret) {
DBG_ERR("Failed to create op queue\n");
ret = PQI_STATUS_FAILURE;
goto err_create_opq;
}
softs->ctrl_online = true;
DBG_FUNC("OUT\n");
return ret;
err_create_opq:
err_config_opq:
pqisrc_destroy_taglist(softs,&softs->taglist);
err_taglist:
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
err_rcb:
pqisrc_destroy_admin_queue(softs);
err_admin_queue:
os_free_intr_config(softs);
err_out:
DBG_FUNC("OUT failed\n");
return PQI_STATUS_FAILURE;
}
/* */
int pqisrc_force_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
if (SIS_IS_KERNEL_PANIC(softs)) {
DBG_INFO("Controller FW is not runnning");
return PQI_STATUS_FAILURE;
}
if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
return ret;
}
if (SIS_IS_KERNEL_UP(softs)) {
PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
return ret;
}
/* Disable interrupts ? */
sis_disable_msix(softs);
/* reset pqi, this will delete queues */
ret = pqi_reset(softs);
if (ret) {
return ret;
}
/* Re enable SIS */
ret = pqisrc_reenable_sis(softs);
if (ret) {
return ret;
}
PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
return ret;
}
/*
* Uninitialize the resources used during PQI initialization.
*/
void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
{
int i;
DBG_FUNC("IN\n");
if(softs->devlist_lockcreated==true){
os_uninit_spinlock(&softs->devlist_lock);
softs->devlist_lockcreated = false;
}
for (i = 0; i < softs->num_op_raid_ibq; i++) {
/* OP RAID IB Q */
if(softs->op_raid_ib_q[i].lockcreated==true){
OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
softs->op_raid_ib_q[i].lockcreated = false;
}
/* OP AIO IB Q */
if(softs->op_aio_ib_q[i].lockcreated==true){
OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
softs->op_aio_ib_q[i].lockcreated = false;
}
}
/* Free Op queues */
os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
os_dma_mem_free(softs, &softs->op_obq_dma_mem);
os_dma_mem_free(softs, &softs->event_q_dma_mem);
/* Complete all pending commands. */
os_complete_outstanding_cmds_nodevice(softs);
/* Free rcb */
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
/* Free request id lists */
pqisrc_destroy_taglist(softs,&softs->taglist);
if(softs->admin_ib_queue.lockcreated==true){
OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
softs->admin_ib_queue.lockcreated = false;
}
/* Free Admin Queue */
os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
/* Switch back to SIS mode */
if (pqisrc_force_sis(softs)) {
DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
}
DBG_FUNC("OUT\n");
}
/*
* Function to initialize the adapter settings.
*/
int pqisrc_init(pqisrc_softstate_t *softs)
{
int ret = 0;
int i = 0, j = 0;
DBG_FUNC("IN\n");
check_struct_sizes();
/* Init the Sync interface */
ret = pqisrc_sis_init(softs);
if (ret) {
DBG_ERR("SIS Init failed with error %d\n", ret);
goto err_out;
}
/* Init the PQI interface */
ret = pqisrc_pqi_init(softs);
if (ret) {
DBG_ERR("PQI Init failed with error %d\n", ret);
goto err_pqi;
}
/* Setup interrupt */
ret = os_setup_intr(softs);
if (ret) {
DBG_ERR("Interrupt setup failed with error %d\n", ret);
goto err_intr;
}
/* Report event configuration */
ret = pqisrc_report_event_config(softs);
if(ret){
DBG_ERR(" Failed to configure Report events\n");
goto err_event;
}
/* Set event configuration*/
ret = pqisrc_set_event_config(softs);
if(ret){
DBG_ERR(" Failed to configure Set events\n");
goto err_event;
}
/* Check for For PQI spanning */
ret = pqisrc_get_ctrl_fw_version(softs);
if(ret){
DBG_ERR(" Failed to get ctrl fw version\n");
goto err_fw_version;
}
/* update driver version in to FW */
ret = pqisrc_write_driver_version_to_host_wellness(softs);
if (ret) {
DBG_ERR(" Failed to update driver version in to FW");
goto err_host_wellness;
}
os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
if(ret){
DBG_ERR(" Failed to initialize devlist_lock\n");
softs->devlist_lockcreated=false;
goto err_lock;
}
softs->devlist_lockcreated = true;
ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
if(ret != PQI_STATUS_SUCCESS){
DBG_ERR(" Failed to initialize scan lock\n");
goto err_scan_lock;
}
OS_ATOMIC64_SET(softs, num_intrs, 0);
softs->prev_num_intrs = softs->num_intrs;
/* Get the PQI configuration table to read heart-beat counter*/
if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
ret = pqisrc_process_config_table(softs);
if (ret) {
DBG_ERR("Failed to process PQI configuration table %d\n", ret);
goto err_config_tab;
}
}
if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
/* Init device list */
for(i = 0; i < PQI_MAX_DEVICES; i++)
for(j = 0; j < PQI_MAX_MULTILUN; j++)
softs->device_list[i][j] = NULL;
DBG_FUNC("OUT\n");
return ret;
err_config_tab:
os_destroy_semaphore(&softs->scan_lock);
err_scan_lock:
if(softs->devlist_lockcreated==true){
os_uninit_spinlock(&softs->devlist_lock);
softs->devlist_lockcreated = false;
}
err_lock:
err_fw_version:
err_event:
err_host_wellness:
os_destroy_intr(softs);
err_intr:
pqisrc_pqi_uninit(softs);
err_pqi:
pqisrc_sis_uninit(softs);
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/*
* Write all data in the adapter's battery-backed cache to
* storage.
*/
int pqisrc_flush_cache( pqisrc_softstate_t *softs,
enum pqisrc_flush_cache_event_type event_type)
{
int rval = PQI_STATUS_SUCCESS;
pqisrc_raid_req_t request;
pqisrc_bmic_flush_cache_t *flush_buff = NULL;
DBG_FUNC("IN\n");
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
if (!flush_buff) {
DBG_ERR("Failed to allocate memory for flush cache params\n");
rval = PQI_STATUS_FAILURE;
return rval;
}
flush_buff->halt_event = event_type;
memset(&request, 0, sizeof(request));
rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
(uint8_t *)RAID_CTLR_LUNID, NULL);
if (rval) {
DBG_ERR("error in build send raid req ret=%d\n", rval);
}
if (flush_buff)
os_mem_free(softs, (void *)flush_buff,
sizeof(pqisrc_bmic_flush_cache_t));
DBG_FUNC("OUT\n");
return rval;
}
/*
* Uninitialize the adapter.
*/
void pqisrc_uninit(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
os_destroy_intr(softs);
os_destroy_semaphore(&softs->scan_lock);
pqisrc_pqi_uninit(softs);
pqisrc_sis_uninit(softs);
pqisrc_cleanup_devices(softs);
DBG_FUNC("OUT\n");
}

View File

@ -0,0 +1,437 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Function to get processor count
*/
int os_get_processor_config(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->num_cpus_online = mp_ncpus;
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
/*
* Function to get interrupt count and type supported
*/
int os_get_intr_config(pqisrc_softstate_t *softs)
{
device_t dev;
int msi_count = 0;
int error = 0;
int ret = PQI_STATUS_SUCCESS;
dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
msi_count = pci_msix_count(dev);
if (msi_count > softs->num_cpus_online)
msi_count = softs->num_cpus_online;
if (msi_count > PQI_MAX_MSIX)
msi_count = PQI_MAX_MSIX;
if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
"will try MSI\n", msi_count, error);
pci_release_msi(dev);
} else {
softs->intr_count = msi_count;
softs->intr_type = INTR_TYPE_MSIX;
softs->os_specific.msi_enabled = TRUE;
device_printf(dev, "using MSI-X interrupts (%d vectors)\n",
msi_count);
}
if (!softs->intr_type) {
msi_count = 1;
if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
device_printf(dev, "alloc msi failed - err=%d; "
"will use INTx\n", error);
pci_release_msi(dev);
} else {
softs->os_specific.msi_enabled = TRUE;
softs->intr_count = msi_count;
softs->intr_type = INTR_TYPE_MSI;
device_printf(dev, "using MSI interrupts\n");
}
}
if (!softs->intr_type) {
device_printf(dev, "using legacy interrupts\n");
softs->intr_type = INTR_TYPE_FIXED;
softs->intr_count = 1;
}
if(!softs->intr_type) {
DBG_FUNC("OUT failed\n");
ret = PQI_STATUS_FAILURE;
return ret;
}
DBG_FUNC("OUT\n");
return ret;
}
void os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc)
{
taskqueue_enqueue(taskqueue_swi, &sc->os_specific.event_task);
}
void pqisrc_event_worker(void *arg1, int arg2)
{
pqisrc_ack_all_events(arg1);
}
/*
* ithread routine to handle uniprocessor systems
*/
static void shared_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
int oq_id = intr_ctx->oq_id;
DBG_FUNC("IN\n");
pqisrc_process_response_queue(softs, oq_id);
pqisrc_process_event_intr_src(softs, oq_id - 1);
DBG_FUNC("OUT\n");
}
/*
* ithread routine to process non event response
*/
static void common_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
int oq_id = intr_ctx->oq_id;
DBG_FUNC("IN\n");
pqisrc_process_response_queue(softs, oq_id);
DBG_FUNC("OUT\n");
}
static void event_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
int oq_id = intr_ctx->oq_id;
DBG_FUNC("IN\n");
pqisrc_process_event_intr_src(softs, oq_id);
DBG_FUNC("OUT\n");
}
/*
* Registration of legacy interrupt in case MSI is unsupported
*/
int register_legacy_intr(pqisrc_softstate_t *softs)
{
int error = 0;
device_t dev;
DBG_FUNC("IN\n");
dev = softs->os_specific.pqi_dev;
softs->os_specific.pqi_irq_rid[0] = 0;
softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0],
RF_ACTIVE | RF_SHAREABLE);
if (NULL == softs->os_specific.pqi_irq[0]) {
DBG_ERR("Failed to allocate resource for interrupt\n");
return PQI_STATUS_FAILURE;
}
if ((softs->os_specific.msi_ctx = os_mem_alloc(softs,sizeof(pqi_intr_ctx_t))) == NULL) {
DBG_ERR("Failed to allocate memory for msi_ctx\n");
return PQI_STATUS_FAILURE;
}
softs->os_specific.msi_ctx[0].pqi_dev = dev;
softs->os_specific.msi_ctx[0].oq_id = 0;
error = bus_setup_intr(dev, softs->os_specific.pqi_irq[0],
INTR_TYPE_CAM | INTR_MPSAFE, \
NULL, shared_ithread_routine,
&softs->os_specific.msi_ctx[0],
&softs->os_specific.intrcookie[0]);
if (error) {
DBG_ERR("Failed to setup legacy interrupt err = %d\n", error);
return error;
}
softs->os_specific.intr_registered[0] = TRUE;
DBG_FUNC("OUT error = %d\n", error);
return error;
}
/*
* Registration of MSIx
*/
int register_msix_intr(pqisrc_softstate_t *softs)
{
int error = 0;
int i = 0;
device_t dev;
dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
DBG_FUNC("IN\n");
softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count);
/*Add shared handler */
if (softs->share_opq_and_eventq) {
softs->os_specific.pqi_irq_rid[i] = i+1;
softs->os_specific.pqi_irq[i] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ,
&softs->os_specific.pqi_irq_rid[i],
RF_SHAREABLE | RF_ACTIVE);
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("Failed to allocate \
event interrupt resource\n");
return PQI_STATUS_FAILURE;
}
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
NULL,
shared_ithread_routine,
&softs->os_specific.msi_ctx[i],
&softs->os_specific.intrcookie[i]);
if (error) {
DBG_ERR("Failed to setup interrupt for events r=%d\n",
error);
return error;
}
softs->os_specific.intr_registered[i] = TRUE;
}
else {
/* Add event handler */
softs->os_specific.pqi_irq_rid[i] = i+1;
softs->os_specific.pqi_irq[i] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ,
&softs->os_specific.pqi_irq_rid[i],
RF_SHAREABLE | RF_ACTIVE);
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("ERR : Failed to allocate \
event interrupt resource\n");
return PQI_STATUS_FAILURE;
}
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
NULL,
event_ithread_routine,
&softs->os_specific.msi_ctx[i],
&softs->os_specific.intrcookie[i]);
if (error) {
DBG_ERR("Failed to setup interrupt for events err=%d\n",
error);
return error;
}
softs->os_specific.intr_registered[i] = TRUE;
/* Add interrupt handlers*/
for (i = 1; i < msix_count; ++i) {
softs->os_specific.pqi_irq_rid[i] = i+1;
softs->os_specific.pqi_irq[i] = \
bus_alloc_resource_any(dev,
SYS_RES_IRQ,
&softs->os_specific.pqi_irq_rid[i],
RF_SHAREABLE | RF_ACTIVE);
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("Failed to allocate \
msi/x interrupt resource\n");
return PQI_STATUS_FAILURE;
}
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
error = bus_setup_intr(dev,
softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
NULL,
common_ithread_routine,
&softs->os_specific.msi_ctx[i],
&softs->os_specific.intrcookie[i]);
if (error) {
DBG_ERR("Failed to setup \
msi/x interrupt error = %d\n", error);
return error;
}
softs->os_specific.intr_registered[i] = TRUE;
}
}
DBG_FUNC("OUT error = %d\n", error);
return error;
}
/*
* Setup interrupt depending on the configuration
*/
int os_setup_intr(pqisrc_softstate_t *softs)
{
int error = 0;
DBG_FUNC("IN\n");
if (softs->intr_type == INTR_TYPE_FIXED) {
error = register_legacy_intr(softs);
}
else {
error = register_msix_intr(softs);
}
if (error) {
DBG_FUNC("OUT failed error = %d\n", error);
return error;
}
DBG_FUNC("OUT error = %d\n", error);
return error;
}
/*
* Deregistration of legacy interrupt
*/
void deregister_pqi_intx(pqisrc_softstate_t *softs)
{
device_t dev;
DBG_FUNC("IN\n");
dev = softs->os_specific.pqi_dev;
if (softs->os_specific.pqi_irq[0] != NULL) {
if (softs->os_specific.intr_registered[0]) {
bus_teardown_intr(dev, softs->os_specific.pqi_irq[0],
softs->os_specific.intrcookie[0]);
softs->os_specific.intr_registered[0] = FALSE;
}
bus_release_resource(dev, SYS_RES_IRQ,
softs->os_specific.pqi_irq_rid[0],
softs->os_specific.pqi_irq[0]);
softs->os_specific.pqi_irq[0] = NULL;
os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t));
}
DBG_FUNC("OUT\n");
}
/*
* Deregistration of MSIx interrupt
*/
void deregister_pqi_msix(pqisrc_softstate_t *softs)
{
device_t dev;
dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
int i = 0;
DBG_FUNC("IN\n");
os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t) * msix_count);
softs->os_specific.msi_ctx = NULL;
for (; i < msix_count; ++i) {
if (softs->os_specific.pqi_irq[i] != NULL) {
if (softs->os_specific.intr_registered[i]) {
bus_teardown_intr(dev,
softs->os_specific.pqi_irq[i],
softs->os_specific.intrcookie[i]);
softs->os_specific.intr_registered[i] = FALSE;
}
bus_release_resource(dev, SYS_RES_IRQ,
softs->os_specific.pqi_irq_rid[i],
softs->os_specific.pqi_irq[i]);
softs->os_specific.pqi_irq[i] = NULL;
}
}
DBG_FUNC("OUT\n");
}
/*
* Function to destroy interrupts registered
*/
int os_destroy_intr(pqisrc_softstate_t *softs)
{
device_t dev;
dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
if (softs->intr_type == INTR_TYPE_FIXED) {
deregister_pqi_intx(softs);
} else if (softs->intr_type == INTR_TYPE_MSIX) {
deregister_pqi_msix(softs);
}
if (softs->os_specific.msi_enabled) {
pci_release_msi(dev);
softs->os_specific.msi_enabled = FALSE;
}
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
/*
* Free interrupt related resources for the adapter
*/
void os_free_intr_config(pqisrc_softstate_t *softs)
{
device_t dev;
dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
if (softs->os_specific.msi_enabled) {
pci_release_msi(dev);
softs->os_specific.msi_enabled = FALSE;
}
DBG_FUNC("OUT\n");
}

View File

@ -0,0 +1,402 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
/*
* Management interface for smartpqi driver
*/
#include "smartpqi_includes.h"
/*
* Wrapper function to copy to user from kernel
*/
int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
void *src_buf, int size, int mode)
{
return(copyout(src_buf, dest_buf, size));
}
/*
* Wrapper function to copy from user to kernel
*/
int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
void *src_buf, int size, int mode)
{
return(copyin(src_buf, dest_buf, size));
}
/*
* Device open function for ioctl entry
*/
static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
struct thread *td)
{
int error = PQI_STATUS_SUCCESS;
return error;
}
/*
* Device close function for ioctl entry
*/
static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
struct thread *td)
{
int error = PQI_STATUS_SUCCESS;
return error;
}
/*
* ioctl for getting driver info
*/
static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
pdriver_info driver_info = (pdriver_info)udata;
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
driver_info->major_version = PQISRC_DRIVER_MAJOR;
driver_info->minor_version = PQISRC_DRIVER_MINOR;
driver_info->release_version = PQISRC_DRIVER_RELEASE;
driver_info->build_revision = PQISRC_DRIVER_REVISION;
driver_info->max_targets = PQI_MAX_DEVICES - 1;
driver_info->max_io = softs->max_io_for_scsi_ml;
driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
DBG_FUNC("OUT\n");
}
/*
* ioctl for getting controller info
*/
static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
device_t dev = softs->os_specific.pqi_dev;
pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
uint32_t sub_vendor = 0;
uint32_t sub_device = 0;
uint32_t vendor = 0;
uint32_t device = 0;
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
pci_info->bus = pci_get_bus(dev);
pci_info->dev_fn = pci_get_function(dev);
pci_info->domain = pci_get_domain(dev);
sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
vendor = pci_get_vendor(dev);
device = pci_get_device(dev);
pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
DBG_FUNC("OUT\n");
}
/*
* ioctl entry point for user
*/
static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
int flags, struct thread *td)
{
int error = PQI_STATUS_SUCCESS;
struct pqisrc_softstate *softs = cdev->si_drv1;
DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
if (!udata) {
DBG_ERR("udata is null !!\n");
}
if (pqisrc_ctrl_offline(softs)){
DBG_ERR("Controller s offline !!\n");
return ENOTTY;
}
switch (cmd) {
case CCISS_GETDRIVVER:
smartpqi_get_driver_info_ioctl(udata, cdev);
break;
case CCISS_GETPCIINFO:
smartpqi_get_pci_info_ioctl(udata, cdev);
break;
case SMARTPQI_PASS_THRU:
case CCISS_PASSTHRU:
error = pqisrc_passthru_ioctl(softs, udata, 0);
error = PQI_STATUS_SUCCESS;
break;
case CCISS_REGNEWD:
error = pqisrc_scan_devices(softs);
break;
default:
DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
error = ENOTTY;
break;
}
DBG_FUNC("OUT error = %d\n", error);
return error;
}
static d_open_t smartpqi_open;
static d_ioctl_t smartpqi_ioctl;
static d_close_t smartpqi_close;
static struct cdevsw smartpqi_cdevsw =
{
.d_version = D_VERSION,
.d_open = smartpqi_open,
.d_close = smartpqi_close,
.d_ioctl = smartpqi_ioctl,
.d_name = "smartpqi",
};
/*
* Function to create device node for ioctl
*/
int create_char_dev(struct pqisrc_softstate *softs, int card_index)
{
int error = PQI_STATUS_SUCCESS;
DBG_FUNC("IN idx = %d\n", card_index);
softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
UID_ROOT, GID_OPERATOR, 0640,
"smartpqi%u", card_index);
if(softs->os_specific.cdev) {
softs->os_specific.cdev->si_drv1 = softs;
} else {
error = PQI_STATUS_FAILURE;
}
DBG_FUNC("OUT error = %d\n", error);
return error;
}
/*
* Function to destroy device node for ioctl
*/
void destroy_char_dev(struct pqisrc_softstate *softs)
{
DBG_FUNC("IN\n");
if (softs->os_specific.cdev) {
destroy_dev(softs->os_specific.cdev);
softs->os_specific.cdev = NULL;
}
DBG_FUNC("OUT\n");
}
/*
* Function used to send passthru commands to adapter
* to support management tools. For eg. ssacli, sscon.
*/
int
pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
{
int ret = PQI_STATUS_SUCCESS;
char *drv_buf = NULL;
uint32_t tag = 0;
IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
dma_mem_t ioctl_dma_buf;
pqisrc_raid_req_t request;
raid_path_error_info_elem_t error_info;
ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
rcb_t *rcb = NULL;
memset(&request, 0, sizeof(request));
memset(&error_info, 0, sizeof(error_info));
DBG_FUNC("IN");
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
if (!arg)
return (PQI_STATUS_FAILURE);
if (iocommand->buf_size < 1 &&
iocommand->Request.Type.Direction != PQIIOCTL_NONE)
return PQI_STATUS_FAILURE;
if (iocommand->Request.CDBLen > sizeof(request.cdb))
return PQI_STATUS_FAILURE;
switch (iocommand->Request.Type.Direction) {
case PQIIOCTL_NONE:
case PQIIOCTL_WRITE:
case PQIIOCTL_READ:
case PQIIOCTL_BIDIRECTIONAL:
break;
default:
return PQI_STATUS_FAILURE;
}
if (iocommand->buf_size > 0) {
memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
ioctl_dma_buf.size = iocommand->buf_size;
ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
/* allocate memory */
ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
if (ret) {
DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
ret = PQI_STATUS_FAILURE;
goto out;
}
DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
drv_buf = (char *)ioctl_dma_buf.virt_addr;
if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf,
iocommand->buf_size, mode)) != 0) {
ret = PQI_STATUS_FAILURE;
goto free_mem;
}
}
}
request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
PQI_REQUEST_HEADER_LENGTH;
memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
sizeof(request.lun_number));
memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
switch (iocommand->Request.Type.Direction) {
case PQIIOCTL_NONE:
request.data_direction = SOP_DATA_DIR_NONE;
break;
case PQIIOCTL_WRITE:
request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
break;
case PQIIOCTL_READ:
request.data_direction = SOP_DATA_DIR_TO_DEVICE;
break;
case PQIIOCTL_BIDIRECTIONAL:
request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
break;
}
request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
if (iocommand->buf_size > 0) {
request.buffer_length = iocommand->buf_size;
request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
request.sg_descriptors[0].len = iocommand->buf_size;
request.sg_descriptors[0].flags = SG_FLAG_LAST;
}
tag = pqisrc_get_tag(&softs->taglist);
request.request_id = tag;
request.response_queue_id = ob_q->q_id;
request.error_index = request.request_id;
rcb = &softs->rcb[tag];
rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
rcb->tag = tag;
rcb->req_pending = true;
/* Submit Command */
ret = pqisrc_submit_cmnd(softs, ib_q, &request);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit command\n");
goto err_out;
}
ret = pqisrc_wait_on_condition(softs, rcb);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Passthru IOCTL cmd timed out !!\n");
goto err_out;
}
memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
if (rcb->status) {
size_t sense_data_length;
memcpy(&error_info, rcb->error_info, sizeof(error_info));
iocommand->error_info.ScsiStatus = error_info.status;
sense_data_length = error_info.sense_data_len;
if (!sense_data_length)
sense_data_length = error_info.resp_data_len;
if (sense_data_length &&
(sense_data_length > sizeof(error_info.data)))
sense_data_length = sizeof(error_info.data);
if (sense_data_length) {
if (sense_data_length >
sizeof(iocommand->error_info.SenseInfo))
sense_data_length =
sizeof(iocommand->error_info.SenseInfo);
memcpy (iocommand->error_info.SenseInfo,
error_info.data, sense_data_length);
iocommand->error_info.SenseLen = sense_data_length;
}
if (error_info.data_out_result ==
PQI_RAID_DATA_IN_OUT_UNDERFLOW){
rcb->status = REQUEST_SUCCESS;
}
}
if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
DBG_ERR("Failed to copy the response\n");
goto err_out;
}
}
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
if (iocommand->buf_size > 0)
os_dma_mem_free(softs,&ioctl_dma_buf);
DBG_FUNC("OUT\n");
return ret;
err_out:
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
free_mem:
if (iocommand->buf_size > 0)
os_dma_mem_free(softs, &ioctl_dma_buf);
out:
DBG_FUNC("Failed OUT\n");
return PQI_STATUS_FAILURE;
}

View File

@ -0,0 +1,144 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _PQI_IOCTL_H_
#define _PQI_IOCTL_H_
/* IOCTL passthrough macros and structures */
#define SENSEINFOBYTES 32 /* note that this value may vary
between host implementations */
/* transfer direction */
#define PQIIOCTL_NONE 0x00
#define PQIIOCTL_WRITE 0x01
#define PQIIOCTL_READ 0x02
#define PQIIOCTL_BIDIRECTIONAL (PQIIOCTL_READ | PQIIOCTL_WRITE)
/* Type defs used in the following structs */
#define BYTE uint8_t
#define WORD uint16_t
#define HWORD uint16_t
#define DWORD uint32_t
/* Command List Structure */
typedef union _SCSI3Addr_struct {
struct {
BYTE Dev;
BYTE Bus:6;
BYTE Mode:2; /* b00 */
} PeripDev;
struct {
BYTE DevLSB;
BYTE DevMSB:6;
BYTE Mode:2; /* b01 */
} LogDev;
struct {
BYTE Dev:5;
BYTE Bus:3;
BYTE Targ:6;
BYTE Mode:2; /* b10 */
} LogUnit;
}OS_ATTRIBUTE_PACKED SCSI3Addr_struct;
typedef struct _PhysDevAddr_struct {
DWORD TargetId:24;
DWORD Bus:6;
DWORD Mode:2;
SCSI3Addr_struct Target[2]; /* 2 level target device addr */
}OS_ATTRIBUTE_PACKED PhysDevAddr_struct;
typedef struct _LogDevAddr_struct {
DWORD VolId:30;
DWORD Mode:2;
BYTE reserved[4];
}OS_ATTRIBUTE_PACKED LogDevAddr_struct;
typedef union _LUNAddr_struct {
BYTE LunAddrBytes[8];
SCSI3Addr_struct SCSI3Lun[4];
PhysDevAddr_struct PhysDev;
LogDevAddr_struct LogDev;
}OS_ATTRIBUTE_PACKED LUNAddr_struct;
typedef struct _RequestBlock_struct {
BYTE CDBLen;
struct {
BYTE Type:3;
BYTE Attribute:3;
BYTE Direction:2;
} Type;
HWORD Timeout;
BYTE CDB[16];
}OS_ATTRIBUTE_PACKED RequestBlock_struct;
typedef union _MoreErrInfo_struct{
struct {
BYTE Reserved[3];
BYTE Type;
DWORD ErrorInfo;
} Common_Info;
struct{
BYTE Reserved[2];
BYTE offense_size; /* size of offending entry */
BYTE offense_num; /* byte # of offense 0-base */
DWORD offense_value;
} Invalid_Cmd;
}OS_ATTRIBUTE_PACKED MoreErrInfo_struct;
typedef struct _ErrorInfo_struct {
BYTE ScsiStatus;
BYTE SenseLen;
HWORD CommandStatus;
DWORD ResidualCnt;
MoreErrInfo_struct MoreErrInfo;
BYTE SenseInfo[SENSEINFOBYTES];
}OS_ATTRIBUTE_PACKED ErrorInfo_struct;
typedef struct pqi_ioctl_passthruCmd_struct {
LUNAddr_struct LUN_info;
RequestBlock_struct Request;
ErrorInfo_struct error_info;
WORD buf_size; /* size in bytes of the buf */
passthru_buf_type_t buf;
}OS_ATTRIBUTE_PACKED IOCTL_Command_struct;
#endif /* _PQI_IOCTL_H_ */

View File

@ -0,0 +1,500 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
/*
* Driver for the Microsemi Smart storage controllers
*/
#include "smartpqi_includes.h"
#include "smartpqi_prototypes.h"
/*
* Supported devices
*/
struct pqi_ident
{
u_int16_t vendor;
u_int16_t device;
u_int16_t subvendor;
u_int16_t subdevice;
int hwif;
char *desc;
} pqi_identifiers[] = {
/* (MSCC PM8205 8x12G based) */
{0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
{0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"},
{0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
{0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
/* (MSCC PM8225 8x12G based) */
{0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
/* (MSCC PM8221 8x12G based) */
{0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
/* (MSCC PM8204 8x12G based) */
{0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
{0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
{0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
{0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
{0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
{0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
{0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
{0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
{0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
/* (MSCC PM8222 8x12G based) */
{0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
{0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
{0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
{0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
{0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
{0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
{0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
{0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
{0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
{0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
/* (SRCx MSCC FVB 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
/* (MSCC PM8241 24x12G based) */
/* (MSCC PM8242 24x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"},
{0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"},
{0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"},
{0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"},
{0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"},
/* (MSCC PM8236 16x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
{0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
/* (MSCC PM8237 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
{0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"},
/* (MSCC PM8238 16x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"},
{0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"},
{0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"},
/* (MSCC PM8240 24x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
{0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"},
{0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
{0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
{0, 0, 0, 0, 0, 0}
};
struct pqi_ident
pqi_family_identifiers[] = {
{0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"},
{0, 0, 0, 0, 0, 0}
};
/*
* Function to identify the installed adapter.
*/
static struct pqi_ident *
pqi_find_ident(device_t dev)
{
struct pqi_ident *m;
u_int16_t vendid, devid, sub_vendid, sub_devid;
vendid = pci_get_vendor(dev);
devid = pci_get_device(dev);
sub_vendid = pci_get_subvendor(dev);
sub_devid = pci_get_subdevice(dev);
for (m = pqi_identifiers; m->vendor != 0; m++) {
if ((m->vendor == vendid) && (m->device == devid) &&
(m->subvendor == sub_vendid) &&
(m->subdevice == sub_devid)) {
return (m);
}
}
for (m = pqi_family_identifiers; m->vendor != 0; m++) {
if ((m->vendor == vendid) && (m->device == devid)) {
return (m);
}
}
return (NULL);
}
/*
* Determine whether this is one of our supported adapters.
*/
static int
smartpqi_probe(device_t dev)
{
struct pqi_ident *id;
if ((id = pqi_find_ident(dev)) != NULL) {
device_set_desc(dev, id->desc);
return(BUS_PROBE_VENDOR);
}
return(ENXIO);
}
/*
* Store Bus/Device/Function in softs
*/
void pqisrc_save_controller_info(struct pqisrc_softstate *softs)
{
device_t dev = softs->os_specific.pqi_dev;
softs->bus_id = (uint32_t)pci_get_bus(dev);
softs->device_id = (uint32_t)pci_get_device(dev);
softs->func_id = (uint32_t)pci_get_function(dev);
}
/*
* Allocate resources for our device, set up the bus interface.
* Initialize the PQI related functionality, scan devices, register sim to
* upper layer, create management interface device node etc.
*/
static int
smartpqi_attach(device_t dev)
{
struct pqisrc_softstate *softs = NULL;
struct pqi_ident *id = NULL;
int error = 0;
u_int32_t command = 0, i = 0;
int card_index = device_get_unit(dev);
rcb_t *rcbp = NULL;
/*
* Initialise softc.
*/
softs = device_get_softc(dev);
if (!softs) {
printf("Could not get softc\n");
error = EINVAL;
goto out;
}
memset(softs, 0, sizeof(*softs));
softs->os_specific.pqi_dev = dev;
DBG_FUNC("IN\n");
/* assume failure is 'not configured' */
error = ENXIO;
/*
* Verify that the adapter is correctly set up in PCI space.
*/
pci_enable_busmaster(softs->os_specific.pqi_dev);
command = pci_read_config(softs->os_specific.pqi_dev, PCIR_COMMAND, 2);
if ((command & PCIM_CMD_MEMEN) == 0) {
DBG_ERR("memory window not available command = %d\n", command);
error = ENXIO;
goto out;
}
/*
* Detect the hardware interface version, set up the bus interface
* indirection.
*/
id = pqi_find_ident(dev);
softs->os_specific.pqi_hwif = id->hwif;
switch(softs->os_specific.pqi_hwif) {
case PQI_HWIF_SRCV:
DBG_INFO("set hardware up for PMC SRCv for %p", softs);
break;
default:
softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN;
DBG_ERR("unknown hardware type\n");
error = ENXIO;
goto out;
}
pqisrc_save_controller_info(softs);
/*
* Allocate the PCI register window.
*/
softs->os_specific.pqi_regs_rid0 = PCIR_BAR(0);
if ((softs->os_specific.pqi_regs_res0 =
bus_alloc_resource_any(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
&softs->os_specific.pqi_regs_rid0, RF_ACTIVE)) == NULL) {
DBG_ERR("couldn't allocate register window 0\n");
/* assume failure is 'out of memory' */
error = ENOMEM;
goto out;
}
bus_get_resource_start(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
softs->os_specific.pqi_regs_rid0);
softs->pci_mem_handle.pqi_btag = rman_get_bustag(softs->os_specific.pqi_regs_res0);
softs->pci_mem_handle.pqi_bhandle = rman_get_bushandle(softs->os_specific.pqi_regs_res0);
/* softs->pci_mem_base_vaddr = (uintptr_t)rman_get_virtual(softs->os_specific.pqi_regs_res0); */
softs->pci_mem_base_vaddr = (char *)rman_get_virtual(softs->os_specific.pqi_regs_res0);
/*
* Allocate the parent bus DMA tag appropriate for our PCI interface.
*
* Note that some of these controllers are 64-bit capable.
*/
if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
PAGE_SIZE, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* No locking needed */
&softs->os_specific.pqi_parent_dmat)) {
DBG_ERR("can't allocate parent DMA tag\n");
/* assume failure is 'out of memory' */
error = ENOMEM;
goto dma_out;
}
softs->os_specific.sim_registered = FALSE;
softs->os_name = "FreeBSD ";
/* Initialize the PQI library */
error = pqisrc_init(softs);
if (error) {
DBG_ERR("Failed to initialize pqi lib error = %d\n", error);
error = PQI_STATUS_FAILURE;
goto out;
}
mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
softs->os_specific.mtx_init = TRUE;
mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
/*
* Create DMA tag for mapping buffers into controller-addressable space.
*/
if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
softs->pqi_cap.max_sg_elem*PAGE_SIZE,/*maxsize*/
softs->pqi_cap.max_sg_elem, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&softs->os_specific.map_lock, /* lockfuncarg*/
&softs->os_specific.pqi_buffer_dmat)) {
DBG_ERR("can't allocate buffer DMA tag for pqi_buffer_dmat\n");
return (ENOMEM);
}
rcbp = &softs->rcb[1];
for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) {
if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) {
DBG_ERR("Cant create datamap for buf @"
"rcbp = %p maxio = %d error = %d\n",
rcbp, softs->pqi_cap.max_outstanding_io, error);
goto dma_out;
}
}
os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */
softs->os_specific.wellness_periodic = timeout( os_wellness_periodic,
softs, 120*hz);
/* Register our shutdown handler. */
softs->os_specific.eh = EVENTHANDLER_REGISTER(shutdown_final,
smartpqi_shutdown, softs, SHUTDOWN_PRI_DEFAULT);
error = pqisrc_scan_devices(softs);
if (error) {
DBG_ERR("Failed to scan lib error = %d\n", error);
error = PQI_STATUS_FAILURE;
goto out;
}
error = register_sim(softs, card_index);
if (error) {
DBG_ERR("Failed to register sim index = %d error = %d\n",
card_index, error);
goto out;
}
smartpqi_target_rescan(softs);
TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs);
error = create_char_dev(softs, card_index);
if (error) {
DBG_ERR("Failed to register character device index=%d r=%d\n",
card_index, error);
goto out;
}
goto out;
dma_out:
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
out:
DBG_FUNC("OUT error = %d\n", error);
return(error);
}
/*
* Deallocate resources for our device.
*/
static int
smartpqi_detach(device_t dev)
{
struct pqisrc_softstate *softs = NULL;
softs = device_get_softc(dev);
DBG_FUNC("IN\n");
EVENTHANDLER_DEREGISTER(shutdown_final, softs->os_specific.eh);
/* kill the periodic event */
untimeout(os_wellness_periodic, softs,
softs->os_specific.wellness_periodic);
/* Kill the heart beat event */
untimeout(os_start_heartbeat_timer, softs,
softs->os_specific.heartbeat_timeout_id);
smartpqi_shutdown(softs);
destroy_char_dev(softs);
pqisrc_uninit(softs);
deregister_sim(softs);
pci_release_msi(dev);
DBG_FUNC("OUT\n");
return 0;
}
/*
* Bring the controller to a quiescent state, ready for system suspend.
*/
static int
smartpqi_suspend(device_t dev)
{
struct pqisrc_softstate *softs;
softs = device_get_softc(dev);
DBG_FUNC("IN\n");
DBG_INFO("Suspending the device %p\n", softs);
softs->os_specific.pqi_state |= SMART_STATE_SUSPEND;
DBG_FUNC("OUT\n");
return(0);
}
/*
* Bring the controller back to a state ready for operation.
*/
static int
smartpqi_resume(device_t dev)
{
struct pqisrc_softstate *softs;
softs = device_get_softc(dev);
DBG_FUNC("IN\n");
softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND;
DBG_FUNC("OUT\n");
return(0);
}
/*
* Do whatever is needed during a system shutdown.
*/
int
smartpqi_shutdown(void *arg)
{
struct pqisrc_softstate *softs = NULL;
int rval = 0;
DBG_FUNC("IN\n");
softs = (struct pqisrc_softstate *)arg;
rval = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN);
if (rval != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to flush adapter cache! rval = %d", rval);
}
DBG_FUNC("OUT\n");
return rval;
}
static int smartpqi_probe(device_t dev);
static int smartpqi_attach(device_t dev);
static int smartpqi_detach(device_t dev);
static int smartpqi_suspend(device_t dev);
static int smartpqi_resume(device_t dev);
/*
* PCI bus interface.
*/
static device_method_t pqi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, smartpqi_probe),
DEVMETHOD(device_attach, smartpqi_attach),
DEVMETHOD(device_detach, smartpqi_detach),
DEVMETHOD(device_suspend, smartpqi_suspend),
DEVMETHOD(device_resume, smartpqi_resume),
{ 0, 0 }
};
static devclass_t pqi_devclass;
static driver_t smartpqi_pci_driver = {
"smartpqi",
pqi_methods,
sizeof(struct pqisrc_softstate)
};
DRIVER_MODULE(smartpqi, pci, smartpqi_pci_driver, pqi_devclass, 0, 0);
MODULE_DEPEND(smartpqi, pci, 1, 1, 1);

View File

@ -0,0 +1,184 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
MALLOC_DEFINE(M_SMARTRAID, "smartraidbuf", "Buffers for the smartraid driver");
/*
* DMA map load callback function
*/
static void
os_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *paddr = (bus_addr_t *)arg;
*paddr = segs[0].ds_addr;
}
int os_dma_setup(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
int os_dma_destroy(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
/*
* DMA mem resource allocation wrapper function
*/
int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
{
int ret = 0;
/* DBG_FUNC("IN\n"); */
/* DMA memory needed - allocate it */
if ((ret = bus_dma_tag_create(
softs->os_specific.pqi_parent_dmat, /* parent */
dma_mem->align, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_mem->size, /* maxsize */
1, /* nsegments */
dma_mem->size, /* maxsegsize */
0, /* flags */
NULL, NULL, /* No locking needed */
&dma_mem->dma_tag)) != 0 ) {
DBG_ERR("can't allocate DMA tag with error = 0x%x\n", ret);
goto err_out;
}
if ((ret = bus_dmamem_alloc(dma_mem->dma_tag, (void **)&dma_mem->virt_addr,
BUS_DMA_NOWAIT, &dma_mem->dma_map)) != 0) {
DBG_ERR("can't allocate DMA memory for required object \
with error = 0x%x\n", ret);
goto err_mem;
}
if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
dma_mem->virt_addr, dma_mem->size,
os_dma_map, &dma_mem->dma_addr, 0)) != 0) {
DBG_ERR("can't load DMA memory for required \
object with error = 0x%x\n", ret);
goto err_load;
}
memset(dma_mem->virt_addr, 0, dma_mem->size);
/* DBG_FUNC("OUT\n"); */
return ret;
err_load:
if(dma_mem->virt_addr)
bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
dma_mem->dma_map);
err_mem:
if(dma_mem->dma_tag)
bus_dma_tag_destroy(dma_mem->dma_tag);
err_out:
DBG_FUNC("failed OUT\n");
return ret;
}
/*
* DMA mem resource deallocation wrapper function
*/
void os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
{
/* DBG_FUNC("IN\n"); */
if(dma_mem->dma_addr) {
bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
dma_mem->dma_addr = 0;
}
if(dma_mem->virt_addr) {
bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
dma_mem->dma_map);
dma_mem->virt_addr = NULL;
}
if(dma_mem->dma_tag) {
bus_dma_tag_destroy(dma_mem->dma_tag);
dma_mem->dma_tag = NULL;
}
/* DBG_FUNC("OUT\n"); */
}
/*
* Mem resource allocation wrapper function
*/
void *os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
{
void *addr = NULL;
/* DBG_FUNC("IN\n"); */
addr = malloc((unsigned long)size, M_SMARTRAID,
M_NOWAIT | M_ZERO);
/* DBG_FUNC("OUT\n"); */
return addr;
}
/*
* Mem resource deallocation wrapper function
*/
void os_mem_free(pqisrc_softstate_t *softs,
char *addr, size_t size)
{
/* DBG_FUNC("IN\n"); */
free((void*)addr, M_SMARTRAID);
/* DBG_FUNC("OUT\n"); */
}
/*
* dma/bus resource deallocation wrapper function
*/
void os_resource_free(pqisrc_softstate_t *softs)
{
if(softs->os_specific.pqi_parent_dmat)
bus_dma_tag_destroy(softs->os_specific.pqi_parent_dmat);
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev,
SYS_RES_MEMORY,
softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
}

View File

@ -0,0 +1,172 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Populate hostwell time variables in bcd format from FreeBSD format
*/
void os_get_time(struct bmic_host_wellness_time *host_wellness_time)
{
struct timespec ts;
struct clocktime ct;
getnanotime(&ts);
clock_ts_to_ct(&ts, &ct);
/* Fill the time In BCD Format */
host_wellness_time->hour= (uint8_t)bin2bcd(ct.hour);
host_wellness_time->min = (uint8_t)bin2bcd(ct.min);
host_wellness_time->sec= (uint8_t)bin2bcd(ct.sec);
host_wellness_time->reserved = 0;
host_wellness_time->month = (uint8_t)bin2bcd(ct.mon);
host_wellness_time->day = (uint8_t)bin2bcd(ct.day);
host_wellness_time->century = (uint8_t)bin2bcd(ct.year / 100);
host_wellness_time->year = (uint8_t)bin2bcd(ct.year % 100);
}
/*
* Update host time to f/w every 24 hours in a periodic timer.
*/
void os_wellness_periodic(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
int ret = 0;
/* update time to FW */
if (!pqisrc_ctrl_offline(softs)){
if( (ret = pqisrc_write_current_time_to_host_wellness(softs)) != 0 )
DBG_ERR("Failed to update time to FW in periodic ret = %d\n", ret);
}
/* reschedule ourselves */
softs->os_specific.wellness_periodic = timeout(os_wellness_periodic,
softs, OS_HOST_WELLNESS_TIMEOUT * hz);
}
/*
* Routine used to stop the heart-beat timer
*/
void os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
/* Kill the heart beat event */
untimeout(os_start_heartbeat_timer, softs,
softs->os_specific.heartbeat_timeout_id);
DBG_FUNC("OUT\n");
}
/*
* Routine used to start the heart-beat timer
*/
void os_start_heartbeat_timer(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
DBG_FUNC("IN\n");
pqisrc_heartbeat_timer_handler(softs);
if (!pqisrc_ctrl_offline(softs)) {
softs->os_specific.heartbeat_timeout_id =
timeout(os_start_heartbeat_timer, softs,
OS_FW_HEARTBEAT_TIMER_INTERVAL * hz);
}
DBG_FUNC("OUT\n");
}
/*
* Mutex initialization function
*/
int os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
char *lockname)
{
mtx_init(lock, lockname, NULL, MTX_SPIN);
return 0;
}
/*
* Mutex uninitialization function
*/
void os_uninit_spinlock(struct mtx *lock)
{
mtx_destroy(lock);
return;
}
/*
* Semaphore initialization function
*/
int os_create_semaphore(const char *name, int value, struct sema *sema)
{
sema_init(sema, value, name);
return PQI_STATUS_SUCCESS;
}
/*
* Semaphore uninitialization function
*/
int os_destroy_semaphore(struct sema *sema)
{
sema_destroy(sema);
return PQI_STATUS_SUCCESS;
}
/*
* Semaphore grab function
*/
void inline os_sema_lock(struct sema *sema)
{
sema_post(sema);
}
/*
* Semaphore release function
*/
void inline os_sema_unlock(struct sema *sema)
{
sema_wait(sema);
}
/*
* string copy wrapper function
*/
int os_strlcpy(char *dst, char *src, int size)
{
return strlcpy(dst, src, size);
}

View File

@ -0,0 +1,263 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _PQI_PROTOTYPES_H
#define _PQI_PROTOTYPES_H
/* Function prototypes */
/*pqi_init.c */
int pqisrc_init(pqisrc_softstate_t *);
void pqisrc_uninit(pqisrc_softstate_t *);
void pqisrc_pqi_uninit(pqisrc_softstate_t *);
int pqisrc_process_config_table(pqisrc_softstate_t *);
int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type);
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *);
/* pqi_sis.c*/
int pqisrc_sis_init(pqisrc_softstate_t *);
void pqisrc_sis_uninit(pqisrc_softstate_t *);
int pqisrc_reenable_sis(pqisrc_softstate_t *);
void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *);
void sis_disable_msix(pqisrc_softstate_t *);
int pqisrc_force_sis(pqisrc_softstate_t *);
int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t);
/* pqi_queue.c */
int pqisrc_submit_admin_req(pqisrc_softstate_t *,
gen_adm_req_iu_t *, gen_adm_resp_iu_t *);
int pqisrc_create_admin_queue(pqisrc_softstate_t *);
int pqisrc_destroy_admin_queue(pqisrc_softstate_t *);
int pqisrc_create_op_queues(pqisrc_softstate_t *);
/* pqi_cmd.c */
int pqisrc_submit_cmnd(pqisrc_softstate_t *,ib_queue_t *,void *);
/* pqi_tag.c */
#ifndef LOCKFREE_STACK
int pqisrc_init_taglist(pqisrc_softstate_t *,pqi_taglist_t *,uint32_t);
void pqisrc_destroy_taglist(pqisrc_softstate_t *,pqi_taglist_t *);
void pqisrc_put_tag(pqi_taglist_t *,uint32_t);
uint32_t pqisrc_get_tag(pqi_taglist_t *);
#else
int pqisrc_init_taglist(pqisrc_softstate_t *, lockless_stack_t *, uint32_t);
void pqisrc_destroy_taglist(pqisrc_softstate_t *, lockless_stack_t *);
void pqisrc_put_tag(lockless_stack_t *,uint32_t);
uint32_t pqisrc_get_tag(lockless_stack_t *);
#endif /* LOCKFREE_STACK */
/* pqi_discovery.c */
void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *);
int pqisrc_rescan_devices(pqisrc_softstate_t *);
int pqisrc_scan_devices(pqisrc_softstate_t *);
void pqisrc_process_raid_path_io_response(pqisrc_softstate_t *, uint16_t, struct pqi_io_response *);
void pqisrc_process_io_error_response(pqisrc_softstate_t *, int, uint16_t, struct pqi_io_response *);
void pqisrc_cleanup_devices(pqisrc_softstate_t *);
void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device);
void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device);
/* pqi_helper.c */
boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
int pqisrc_wait_on_condition(pqisrc_softstate_t *, rcb_t *);
boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_is_hba_lunid(uint8_t *);
boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *);
void pqisrc_sanitize_inquiry_string(unsigned char *, int );
void pqisrc_display_device_info(pqisrc_softstate_t *, char *, pqi_scsi_dev_t *);
boolean_t pqisrc_scsi3addr_equal(uint8_t *, uint8_t *);
void check_struct_sizes(void);
char *pqisrc_raidlevel_to_string(uint8_t);
/* pqi_response.c */
void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb);
void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *,
rcb_t *);
void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_io_response_success(pqisrc_softstate_t *,
rcb_t *);
void pqisrc_process_aio_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_raid_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_response_queue(pqisrc_softstate_t *, int);
/* pqi_request.c */
int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*);
int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
rcb_t *, int, int);
int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
/* pqi_event.c*/
int pqisrc_report_event_config(pqisrc_softstate_t *);
int pqisrc_set_event_config(pqisrc_softstate_t *);
int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int);
void pqisrc_ack_all_events(void *arg);
void pqisrc_event_worker(void *, int);
int pqisrc_scsi_setup(struct pqisrc_softstate *);
void pqisrc_scsi_cleanup(struct pqisrc_softstate *);
boolean_t pqisrc_update_scsi_sense(const uint8_t *, int,
struct sense_header_scsi *);
int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *,
void *, size_t, uint8_t, uint16_t, uint8_t *,
raid_path_error_info_elem_t *);
int pqisrc_submit_management_req(pqisrc_softstate_t *,
pqi_event_config_request_t *);
void pqisrc_take_devices_offline(pqisrc_softstate_t *);
void pqisrc_take_ctrl_offline(pqisrc_softstate_t *);
void pqisrc_free_rcb(pqisrc_softstate_t *, int);
void pqisrc_decide_opq_config(pqisrc_softstate_t *);
int pqisrc_configure_op_queues(pqisrc_softstate_t *);
int pqisrc_pqi_init(pqisrc_softstate_t *);
int pqi_reset(pqisrc_softstate_t *);
int pqisrc_check_pqimode(pqisrc_softstate_t *);
int pqisrc_check_fw_status(pqisrc_softstate_t *);
int pqisrc_init_struct_base(pqisrc_softstate_t *);
int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *);
int pqisrc_get_preferred_settings(pqisrc_softstate_t *);
int pqisrc_get_adapter_properties(pqisrc_softstate_t *,
uint32_t *, uint32_t *);
void pqisrc_get_admin_queue_config(pqisrc_softstate_t *);
void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *);
int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *);
int pqisrc_create_delete_adminq(pqisrc_softstate_t *, uint32_t);
void pqisrc_print_adminq_config(pqisrc_softstate_t *);
int pqisrc_delete_op_queue(pqisrc_softstate_t *,
uint32_t, boolean_t);
void pqisrc_destroy_event_queue(pqisrc_softstate_t *);
void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *);
void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *);
int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *,
ib_queue_t *, uint32_t);
int pqisrc_create_op_obq(pqisrc_softstate_t *,
ob_queue_t *);
int pqisrc_create_op_ibq(pqisrc_softstate_t *,
ib_queue_t *);
int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *, ib_queue_t *);
int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *);
int pqisrc_process_task_management_response(pqisrc_softstate_t *,
pqi_tmf_resp_t *);
/* pqi_ioctl.c*/
int
pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
/* Functions Prototypes */
/* FreeBSD_mem.c */
int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *);
void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *);
void *os_mem_alloc(pqisrc_softstate_t *,size_t);
void os_mem_free(pqisrc_softstate_t *,char *,size_t);
void os_resource_free(pqisrc_softstate_t *);
int os_dma_setup(pqisrc_softstate_t *);
int os_dma_destroy(pqisrc_softstate_t *);
/* FreeBSD intr.c */
int os_get_intr_config(pqisrc_softstate_t *);
int os_setup_intr(pqisrc_softstate_t *);
int os_destroy_intr(pqisrc_softstate_t *);
int os_get_processor_config(pqisrc_softstate_t *);
void os_free_intr_config(pqisrc_softstate_t *);
/* FreeBSD_ioctl.c */
int os_copy_to_user(struct pqisrc_softstate *, void *,
void *, int, int);
int os_copy_from_user(struct pqisrc_softstate *, void *,
void *, int, int);
int create_char_dev(struct pqisrc_softstate *, int);
void destroy_char_dev(struct pqisrc_softstate *);
/* FreeBSD_misc.c*/
int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *);
void os_uninit_spinlock(struct mtx *);
int os_create_semaphore(const char *, int,struct sema *);
int os_destroy_semaphore(struct sema *);
void os_sema_lock(struct sema *);
void os_sema_unlock(struct sema *);
int os_strlcpy(char *dst, char *src, int len);
void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *);
void os_stop_heartbeat_timer(pqisrc_softstate_t *);
void os_start_heartbeat_timer(void *);
/* FreeBSD_cam.c */
int pqisrc_scsi_setup(struct pqisrc_softstate *);
void pqisrc_scsi_cleanup(struct pqisrc_softstate *);
uint8_t os_get_task_attr(rcb_t *);
void os_wellness_periodic(void *);
void smartpqi_target_rescan(struct pqisrc_softstate *);
/* FreeBSD_intr.c FreeBSD_main.c */
void pqisrc_event_worker(void *, int);
void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_io_response_success(rcb_t *);
void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *);
void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t );
void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *);
void os_wellness_periodic(void *);
void os_reset_rcb( rcb_t *);
int register_sim(struct pqisrc_softstate *, int);
void deregister_sim(struct pqisrc_softstate *);
int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *,
uint32_t *);
int register_legacy_intr(pqisrc_softstate_t *);
int register_msix_intr(pqisrc_softstate_t *);
void deregister_pqi_intx(pqisrc_softstate_t *);
void deregister_pqi_msix(pqisrc_softstate_t *);
void os_get_time(struct bmic_host_wellness_time *);
void os_eventtaskqueue_enqueue(pqisrc_softstate_t *);
void pqisrc_save_controller_info(struct pqisrc_softstate *);
int smartpqi_shutdown(void *);
#endif // _SMARTPQI_PROTOTYPES_H

View File

@ -0,0 +1,995 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Submit an admin IU to the adapter.
* Add interrupt support, if required
*/
int pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
gen_adm_req_iu_t *req, gen_adm_resp_iu_t *resp)
{
int ret = PQI_STATUS_SUCCESS;
ob_queue_t *ob_q = &softs->admin_ob_queue;
ib_queue_t *ib_q = &softs->admin_ib_queue;
int tmo = PQISRC_ADMIN_CMD_RESP_TIMEOUT;
DBG_FUNC("IN\n");
req->header.iu_type =
PQI_IU_TYPE_GENERAL_ADMIN_REQUEST;
req->header.comp_feature = 0x00;
req->header.iu_length = PQI_STANDARD_IU_LENGTH;
req->res1 = 0;
req->work = 0;
/* Get the tag */
req->req_id = pqisrc_get_tag(&softs->taglist);
if (INVALID_ELEM == req->req_id) {
DBG_ERR("Tag not available0x%x\n",(uint16_t)req->req_id);
ret = PQI_STATUS_FAILURE;
goto err_out;
}
softs->rcb[req->req_id].tag = req->req_id;
/* Submit the command to the admin ib queue */
ret = pqisrc_submit_cmnd(softs, ib_q, req);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit command\n");
goto err_cmd;
}
/* Wait for completion */
COND_WAIT((*(ob_q->pi_virt_addr) != ob_q->ci_local), tmo);
if (tmo <= 0) {
DBG_ERR("Admin cmd timeout\n");
DBG_ERR("tmo : %d\n",tmo); \
ret = PQI_STATUS_TIMEOUT;
goto err_cmd;
}
/* Copy the response */
memcpy(resp, ob_q->array_virt_addr + (ob_q->ci_local * ob_q->elem_size),
sizeof(gen_adm_resp_iu_t));
/* Update CI */
ob_q->ci_local = (ob_q->ci_local + 1 ) % ob_q->num_elem;
PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, LE_32(ob_q->ci_local));
/* Validate the response data */
ASSERT(req->fn_code == resp->fn_code);
ASSERT(resp->header.iu_type == PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE);
ret = resp->status;
if (ret)
goto err_cmd;
os_reset_rcb(&softs->rcb[req->req_id]);
pqisrc_put_tag(&softs->taglist,req->req_id);
DBG_FUNC("OUT\n");
return ret;
err_cmd:
os_reset_rcb(&softs->rcb[req->req_id]);
pqisrc_put_tag(&softs->taglist,req->req_id);
err_out:
DBG_FUNC("failed OUT : %d\n", ret);
return ret;
}
/*
* Get the administration queue config parameters.
*/
void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
{
uint64_t val = 0;
val = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP));
/* pqi_cap = (struct pqi_dev_adminq_cap *)&val;*/
softs->admin_ib_queue.num_elem = val & 0xFF;
softs->admin_ob_queue.num_elem = (val & 0xFF00) >> 8;
/* Note : size in unit of 16 byte s*/
softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16;
softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16;
DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n",
softs->admin_ib_queue.num_elem);
DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n",
softs->admin_ib_queue.elem_size);
}
/*
* Decide the no of elements in admin ib and ob queues.
*/
void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
{
/* Determine num elements in Admin IBQ */
softs->admin_ib_queue.num_elem = MIN(softs->admin_ib_queue.num_elem,
PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM);
/* Determine num elements in Admin OBQ */
softs->admin_ob_queue.num_elem = MIN(softs->admin_ob_queue.num_elem,
PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM);
}
/*
* Allocate DMA memory for admin queue and initialize.
*/
int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
{
uint32_t ib_array_size = 0;
uint32_t ob_array_size = 0;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
int ret = PQI_STATUS_SUCCESS;
ib_array_size = (softs->admin_ib_queue.num_elem *
softs->admin_ib_queue.elem_size);
ob_array_size = (softs->admin_ob_queue.num_elem *
softs->admin_ob_queue.elem_size);
alloc_size = ib_array_size + ob_array_size +
2 * sizeof(uint32_t) + PQI_ADDR_ALIGN_MASK_64 + 1; /* for IB CI and OB PI */
/* Allocate memory for Admin Q */
softs->admin_queue_dma_mem.tag = "admin_queue";
softs->admin_queue_dma_mem.size = alloc_size;
softs->admin_queue_dma_mem.align = PQI_ADMINQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->admin_queue_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
goto err_out;
}
/* Setup the address */
virt_addr = softs->admin_queue_dma_mem.virt_addr;
dma_addr = softs->admin_queue_dma_mem.dma_addr;
/* IB */
softs->admin_ib_queue.q_id = 0;
softs->admin_ib_queue.array_virt_addr = virt_addr;
softs->admin_ib_queue.array_dma_addr = dma_addr;
softs->admin_ib_queue.pi_local = 0;
/* OB */
softs->admin_ob_queue.q_id = 0;
softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size;
softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size;
softs->admin_ob_queue.ci_local = 0;
/* IB CI */
softs->admin_ib_queue.ci_virt_addr =
(uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ ob_array_size);
softs->admin_ib_queue.ci_dma_addr =
(dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
ob_array_size);
/* OB PI */
softs->admin_ob_queue.pi_virt_addr =
(uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
softs->admin_ob_queue.pi_dma_addr =
(dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
DBG_INFO("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n",
(void*)softs->admin_ib_queue.ci_dma_addr, (void*)softs->admin_ob_queue.pi_dma_addr );
/* Verify alignment */
ASSERT(!(softs->admin_ib_queue.array_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
ASSERT(!(softs->admin_ib_queue.ci_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
ASSERT(!(softs->admin_ob_queue.array_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
ASSERT(!(softs->admin_ob_queue.pi_dma_addr &
PQI_ADDR_ALIGN_MASK_64));
DBG_FUNC("OUT\n");
return ret;
err_out:
DBG_FUNC("failed OUT\n");
return PQI_STATUS_FAILURE;
}
/*
* Subroutine used to create (or) delete the admin queue requested.
*/
int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs,
uint32_t cmd)
{
int tmo = 0;
int ret = PQI_STATUS_SUCCESS;
/* Create Admin Q pair writing to Admin Q config function reg */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG, LE_64(cmd));
if (cmd == PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR)
tmo = PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT;
else
tmo = PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT;
/* Wait for completion */
COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG) ==
PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
if (tmo <= 0) {
DBG_ERR("Unable to create/delete admin queue pair\n");
ret = PQI_STATUS_TIMEOUT;
}
return ret;
}
/*
* Debug admin queue configuration params.
*/
void pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
{
DBG_INFO(" softs->admin_ib_queue.array_dma_addr : %p\n",
(void*)softs->admin_ib_queue.array_dma_addr);
DBG_INFO(" softs->admin_ib_queue.array_virt_addr : %p\n",
(void*)softs->admin_ib_queue.array_virt_addr);
DBG_INFO(" softs->admin_ib_queue.num_elem : %d\n",
softs->admin_ib_queue.num_elem);
DBG_INFO(" softs->admin_ib_queue.elem_size : %d\n",
softs->admin_ib_queue.elem_size);
DBG_INFO(" softs->admin_ob_queue.array_dma_addr : %p\n",
(void*)softs->admin_ob_queue.array_dma_addr);
DBG_INFO(" softs->admin_ob_queue.array_virt_addr : %p\n",
(void*)softs->admin_ob_queue.array_virt_addr);
DBG_INFO(" softs->admin_ob_queue.num_elem : %d\n",
softs->admin_ob_queue.num_elem);
DBG_INFO(" softs->admin_ob_queue.elem_size : %d\n",
softs->admin_ob_queue.elem_size);
DBG_INFO(" softs->admin_ib_queue.pi_register_abs : %p\n",
(void*)softs->admin_ib_queue.pi_register_abs);
DBG_INFO(" softs->admin_ob_queue.ci_register_abs : %p\n",
(void*)softs->admin_ob_queue.ci_register_abs);
}
/*
* Function used to create an admin queue.
*/
int pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;;
uint32_t admin_q_param = 0;
DBG_FUNC("IN\n");
/* Get admin queue details - pqi2-r00a - table 24 */
pqisrc_get_admin_queue_config(softs);
/* Decide admin Q config */
pqisrc_decide_admin_queue_config(softs);
/* Allocate and init Admin Q pair */
ret = pqisrc_allocate_and_init_adminq(softs);
if (ret) {
DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
goto err_out;
}
/* Write IB Q element array address */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr,
PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ib_queue.array_dma_addr));
/* Write OB Q element array address */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr,
PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ob_queue.array_dma_addr));
/* Write IB Q CI address */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr,
PQI_ADMIN_IBQ_CI_ADDR, LE_64(softs->admin_ib_queue.ci_dma_addr));
/* Write OB Q PI address */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr,
PQI_ADMIN_OBQ_PI_ADDR, LE_64(softs->admin_ob_queue.pi_dma_addr));
/* Write Admin Q params pqi-r200a table 36 */
admin_q_param = softs->admin_ib_queue.num_elem |
(softs->admin_ob_queue.num_elem << 8)|
PQI_ADMIN_QUEUE_MSIX_DISABLE;
PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param,
PQI_ADMINQ_PARAM, LE_32(admin_q_param));
/* Submit cmd to create Admin Q pair */
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR);
if (ret) {
DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
goto err_q_create;
}
/* Admin queue created, get ci,pi offset */
softs->admin_ib_queue.pi_register_offset =(PQISRC_PQI_REG_OFFSET +
PCI_MEM_GET64(softs, &softs->pqi_reg->admin_ibq_pi_offset, PQI_ADMIN_IBQ_PI_OFFSET));
softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
softs->admin_ib_queue.pi_register_offset);
softs->admin_ob_queue.ci_register_offset = (PQISRC_PQI_REG_OFFSET +
PCI_MEM_GET64(softs, &softs->pqi_reg->admin_obq_ci_offset, PQI_ADMIN_OBQ_CI_OFFSET));
softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
softs->admin_ob_queue.ci_register_offset);
os_strlcpy(softs->admin_ib_queue.lockname, "admin_ibqlock", LOCKNAME_SIZE);
ret =OS_INIT_PQILOCK(softs, &softs->admin_ib_queue.lock,
softs->admin_ib_queue.lockname);
if(ret){
DBG_ERR("Admin spinlock initialization failed\n");
softs->admin_ib_queue.lockcreated = false;
goto err_out;
}
softs->admin_ib_queue.lockcreated = true;
/* Print admin q config details */
pqisrc_print_adminq_config(softs);
DBG_FUNC("OUT\n");
return ret;
err_q_create:
os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
err_out:
DBG_FUNC("failed OUT\n");
return ret;
}
/*
* Subroutine used to delete an operational queue.
*/
int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
uint32_t q_id, boolean_t ibq)
{
int ret = PQI_STATUS_SUCCESS;
/* Firmware doesn't support this now */
#if 0
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
DBG_FUNC("IN\n");
admin_req.req_type.create_op_iq.qid = q_id;
if (ibq)
admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_IQ;
else
admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_OQ;
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
DBG_FUNC("OUT\n");
#endif
return ret;
}
/*
* Function used to destroy the event queue.
*/
void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
if (softs->event_q.created == true) {
int ret = PQI_STATUS_SUCCESS;
ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false);
if (ret) {
DBG_ERR("Failed to Delete Event Q %d\n", softs->event_q.q_id);
}
softs->event_q.created = false;
}
/* Free the memory */
os_dma_mem_free(softs, &softs->event_q_dma_mem);
DBG_FUNC("OUT\n");
}
/*
* Function used to destroy operational ib queues.
*/
void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = NULL;
int i;
DBG_FUNC("IN\n");
for (i = 0; i < softs->num_op_raid_ibq; i++) {
/* OP RAID IB Q */
op_ib_q = &softs->op_raid_ib_q[i];
if (op_ib_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
if (ret) {
DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id);
}
op_ib_q->created = false;
}
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
/* OP AIO IB Q */
op_ib_q = &softs->op_aio_ib_q[i];
if (op_ib_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
if (ret) {
DBG_ERR("Failed to Delete AIO IB Q %d\n",op_ib_q->q_id);
}
op_ib_q->created = false;
}
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
}
/* Free the memory */
os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
DBG_FUNC("OUT\n");
}
/*
* Function used to destroy operational ob queues.
*/
void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i;
DBG_FUNC("IN\n");
for (i = 0; i < softs->num_op_obq; i++) {
ob_queue_t *op_ob_q = NULL;
op_ob_q = &softs->op_ob_q[i];
if (op_ob_q->created == true) {
ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false);
if (ret) {
DBG_ERR("Failed to Delete OB Q %d\n",op_ob_q->q_id);
}
op_ob_q->created = false;
}
}
/* Free the memory */
os_dma_mem_free(softs, &softs->op_obq_dma_mem);
DBG_FUNC("OUT\n");
}
/*
* Function used to destroy an admin queue.
*/
int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
#if 0
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
#endif
os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
DBG_FUNC("OUT\n");
return ret;
}
/*
* Function used to change operational ib queue properties.
*/
int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
ib_queue_t *op_ib_q, uint32_t prop)
{
int ret = PQI_STATUS_SUCCESS;;
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
DBG_FUNC("IN\n");
admin_req.fn_code = PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP;
admin_req.req_type.change_op_iq_prop.qid = op_ib_q->q_id;
admin_req.req_type.change_op_iq_prop.vend_specific = prop;
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
DBG_FUNC("OUT\n");
return ret;
}
/*
* Function used to create an operational ob queue.
*/
int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
ob_queue_t *op_ob_q)
{
int ret = PQI_STATUS_SUCCESS;;
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
DBG_FUNC("IN\n");
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_OQ;
admin_req.req_type.create_op_oq.qid = op_ob_q->q_id;
admin_req.req_type.create_op_oq.intr_msg_num = op_ob_q->intr_msg_num;
admin_req.req_type.create_op_oq.elem_arr_addr = op_ob_q->array_dma_addr;
admin_req.req_type.create_op_oq.ob_pi_addr = op_ob_q->pi_dma_addr;
admin_req.req_type.create_op_oq.num_elem = op_ob_q->num_elem;
admin_req.req_type.create_op_oq.elem_len = op_ob_q->elem_size / 16;
DBG_INFO("admin_req.req_type.create_op_oq.qid : %x\n",admin_req.req_type.create_op_oq.qid);
DBG_INFO("admin_req.req_type.create_op_oq.intr_msg_num : %x\n", admin_req.req_type.create_op_oq.intr_msg_num );
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
if( PQI_STATUS_SUCCESS == ret) {
op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET +
admin_resp.resp_type.create_op_oq.ci_offset);
op_ob_q->ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
op_ob_q->ci_register_offset);
} else {
int i = 0;
DBG_WARN("Error Status Descriptors\n");
for(i = 0; i < 4;i++)
DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
}
DBG_FUNC("OUT ret : %d\n", ret);
return ret;
}
/*
* Function used to create an operational ib queue.
*/
int pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_ib_q)
{
int ret = PQI_STATUS_SUCCESS;;
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
DBG_FUNC("IN\n");
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_IQ;
admin_req.req_type.create_op_iq.qid = op_ib_q->q_id;
admin_req.req_type.create_op_iq.elem_arr_addr = op_ib_q->array_dma_addr;
admin_req.req_type.create_op_iq.iq_ci_addr = op_ib_q->ci_dma_addr;
admin_req.req_type.create_op_iq.num_elem = op_ib_q->num_elem;
admin_req.req_type.create_op_iq.elem_len = op_ib_q->elem_size / 16;
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
if( PQI_STATUS_SUCCESS == ret) {
op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET +
admin_resp.resp_type.create_op_iq.pi_offset);
op_ib_q->pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
op_ib_q->pi_register_offset);
} else {
int i = 0;
DBG_WARN("Error Status Decsriptors\n");
for(i = 0; i < 4;i++)
DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
}
DBG_FUNC("OUT ret : %d\n", ret);
return ret;
}
/*
* subroutine used to create an operational ib queue for AIO.
*/
int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_aio_ib_q)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
ret = pqisrc_create_op_ibq(softs,op_aio_ib_q);
if ( PQI_STATUS_SUCCESS == ret)
ret = pqisrc_change_op_ibq_queue_prop(softs,
op_aio_ib_q, PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO);
DBG_FUNC("OUT ret : %d\n", ret);
return ret;
}
/*
* subroutine used to create an operational ib queue for RAID.
*/
int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_raid_ib_q)
{
int ret = PQI_STATUS_SUCCESS;;
DBG_FUNC("IN\n");
ret = pqisrc_create_op_ibq(softs,op_raid_ib_q);
DBG_FUNC("OUT\n");
return ret;
}
/*
* Allocate and create an event queue to process supported events.
*/
int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
uint32_t num_elem;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t event_q_pi_dma_start_offset = 0;
uint32_t event_q_pi_virt_start_offset = 0;
char *event_q_pi_virt_start_addr = NULL;
ob_queue_t *event_q = NULL;
DBG_FUNC("IN\n");
/*
* Calculate memory requirements.
* If event queue is shared for IO response, number of
* elements in event queue depends on num elements in OP OB Q
* also. Since event queue element size (32) is more than IO
* response size , event queue element size need not be checked
* for queue size calculation.
*/
#ifdef SHARE_EVENT_QUEUE_FOR_IO
num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM);
#else
num_elem = PQISRC_NUM_EVENT_Q_ELEM;
#endif
alloc_size = num_elem * PQISRC_EVENT_Q_ELEM_SIZE;
event_q_pi_dma_start_offset = alloc_size;
event_q_pi_virt_start_offset = alloc_size;
alloc_size += sizeof(uint32_t); /*For IBQ CI*/
/* Allocate memory for event queues */
softs->event_q_dma_mem.tag = "event_queue";
softs->event_q_dma_mem.size = alloc_size;
softs->event_q_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->event_q_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Event Q ret : %d\n"
, ret);
goto err_out;
}
/* Set up the address */
virt_addr = softs->event_q_dma_mem.virt_addr;
dma_addr = softs->event_q_dma_mem.dma_addr;
event_q_pi_dma_start_offset += dma_addr;
event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset;
event_q = &softs->event_q;
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr);
event_q->q_id = PQI_OP_EVENT_QUEUE_ID;
event_q->num_elem = num_elem;
event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE;
event_q->pi_dma_addr = event_q_pi_dma_start_offset;
event_q->pi_virt_addr = (uint32_t *)event_q_pi_virt_start_addr;
event_q->intr_msg_num = 0; /* vector zero for event */
ASSERT(!(event_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_obq(softs,event_q);
if (ret) {
DBG_ERR("Failed to Create EventQ %d\n",event_q->q_id);
goto err_out_create;
}
event_q->created = true;
DBG_FUNC("OUT\n");
return ret;
err_out_create:
pqisrc_destroy_event_queue(softs);
err_out:
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
/*
* Allocate DMA memory and create operational ib queues.
*/
int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t ibq_size = 0;
uint32_t ib_ci_dma_start_offset = 0;
char *ib_ci_virt_start_addr = NULL;
uint32_t ib_ci_virt_start_offset = 0;
uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
ib_queue_t *op_ib_q = NULL;
uint32_t num_op_ibq = softs->num_op_raid_ibq +
softs->num_op_aio_ibq;
int i = 0;
DBG_FUNC("IN\n");
/* Calculate memory requirements */
ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size;
alloc_size = num_op_ibq * ibq_size;
/* CI indexes starts after Queue element array */
ib_ci_dma_start_offset = alloc_size;
ib_ci_virt_start_offset = alloc_size;
alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/
/* Allocate memory for IB queues */
softs->op_ibq_dma_mem.tag = "op_ib_queue";
softs->op_ibq_dma_mem.size = alloc_size;
softs->op_ibq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->op_ibq_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n",
ret);
goto err_out;
}
/* Set up the address */
virt_addr = softs->op_ibq_dma_mem.virt_addr;
dma_addr = softs->op_ibq_dma_mem.dma_addr;
ib_ci_dma_start_offset += dma_addr;
ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset;
ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq);
for (i = 0; i < softs->num_op_raid_ibq; i++) {
/* OP RAID IB Q */
op_ib_q = &softs->op_raid_ib_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
if(ret){
DBG_ERR("raid_ibqlock %d init failed\n", i);
op_ib_q->lockcreated = false;
goto err_lock;
}
op_ib_q->lockcreated = true;
op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(2 * i * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(2 * i * sizeof(uint32_t)));
ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n",
__func__, op_ib_q->q_id);
goto err_out_create;
}
op_ib_q->created = true;
/* OP AIO IB Q */
virt_addr += ibq_size;
dma_addr += ibq_size;
op_ib_q = &softs->op_aio_ib_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
if(ret){
DBG_ERR("aio_ibqlock %d init failed\n", i);
op_ib_q->lockcreated = false;
goto err_lock;
}
op_ib_q->lockcreated = true;
op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(((2 * i) + 1) * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(((2 * i) + 1) * sizeof(uint32_t)));
ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id);
goto err_out_create;
}
op_ib_q->created = true;
virt_addr += ibq_size;
dma_addr += ibq_size;
}
DBG_FUNC("OUT\n");
return ret;
err_lock:
err_out_create:
pqisrc_destroy_op_ib_queues(softs);
err_out:
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
/*
* Allocate DMA memory and create operational ob queues.
*/
int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t obq_size = 0;
uint32_t ob_pi_dma_start_offset = 0;
uint32_t ob_pi_virt_start_offset = 0;
char *ob_pi_virt_start_addr = NULL;
uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID;
ob_queue_t *op_ob_q = NULL;
uint32_t num_op_obq = softs->num_op_obq;
int i = 0;
DBG_FUNC("IN\n");
/*
* OB Q element array should be 64 byte aligned.
* So the number of elements in OB Q should be multiple
* of 4, so that OB Queue element size (16) * num elements
* will be multiple of 64.
*/
ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4);
obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size;
alloc_size += num_op_obq * obq_size;
/* PI indexes starts after Queue element array */
ob_pi_dma_start_offset = alloc_size;
ob_pi_virt_start_offset = alloc_size;
alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/
/* Allocate memory for OB queues */
softs->op_obq_dma_mem.tag = "op_ob_queue";
softs->op_obq_dma_mem.size = alloc_size;
softs->op_obq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
ret = os_dma_mem_alloc(softs, &softs->op_obq_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n",
ret);
goto err_out;
}
/* Set up the address */
virt_addr = softs->op_obq_dma_mem.virt_addr;
dma_addr = softs->op_obq_dma_mem.dma_addr;
ob_pi_dma_start_offset += dma_addr;
ob_pi_virt_start_addr = virt_addr + ob_pi_virt_start_offset;
DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq);
for (i = 0; i < softs->num_op_obq; i++) {
op_ob_q = &softs->op_ob_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr);
op_ob_q->q_id = obq_id++;
if(softs->share_opq_and_eventq == true)
op_ob_q->intr_msg_num = i;
else
op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
op_ob_q->num_elem = softs->num_elem_per_op_obq;
op_ob_q->elem_size = softs->obq_elem_size;
op_ob_q->pi_dma_addr = ob_pi_dma_start_offset +
(i * sizeof(uint32_t));
op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr +
(i * sizeof(uint32_t)));
ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
ret = pqisrc_create_op_obq(softs,op_ob_q);
if (ret) {
DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id);
goto err_out_create;
}
op_ob_q->created = true;
virt_addr += obq_size;
dma_addr += obq_size;
}
DBG_FUNC("OUT\n");
return ret;
err_out_create:
pqisrc_destroy_op_ob_queues(softs);
err_out:
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
/*
* Function used to create operational queues for the adapter.
*/
int pqisrc_create_op_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
/* Create Operational IB queues */
ret = pqisrc_alloc_and_create_ib_queues(softs);
if (ret)
goto err_out;
/* Create Operational OB queues */
ret = pqisrc_alloc_and_create_ob_queues(softs);
if (ret)
goto err_out_obq;
/* Create Event queue */
ret = pqisrc_alloc_and_create_event_queue(softs);
if (ret)
goto err_out_eventq;
DBG_FUNC("OUT\n");
return ret;
err_out_eventq:
pqisrc_destroy_op_ob_queues(softs);
err_out_obq:
pqisrc_destroy_op_ib_queues(softs);
err_out:
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}

View File

@ -0,0 +1,791 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
/* Subroutine to find out embedded sgl count in IU */
static inline
uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
{
uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
DBG_FUNC(" IN ");
/**
calculate embedded sgl count using num_elem_alloted for IO
**/
if(elem_alloted - 1)
embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
DBG_FUNC(" OUT ");
return embedded_sgl_count;
}
/* Subroutine to find out contiguous free elem in IU */
static inline
uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
{
uint32_t contiguous_free_elem = 0;
DBG_FUNC(" IN ");
if(pi >= ci) {
contiguous_free_elem = (elem_in_q - pi);
if(ci == 0)
contiguous_free_elem -= 1;
} else {
contiguous_free_elem = (ci - pi - 1);
}
DBG_FUNC(" OUT ");
return contiguous_free_elem;
}
/* Subroutine to find out num of elements need for the request */
static uint32_t
pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
{
uint32_t num_sg;
uint32_t num_elem_required = 1;
DBG_FUNC(" IN ");
DBG_IO("SGL_Count :%d",SG_Count);
/********
If SG_Count greater than max sg per IU i.e 4 or 68
(4 is with out spanning or 68 is with spanning) chaining is required.
OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
on these two cases one element is enough.
********/
if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
return num_elem_required;
/*
SGL Count Other Than First IU
*/
num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
DBG_FUNC(" OUT ");
return num_elem_required;
}
/* Subroutine to build SG list for the IU submission*/
static
boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
uint32_t num_elem_alloted)
{
uint32_t i;
uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
sgt_t *sgt = sg_array;
sgt_t *sg_chain = NULL;
boolean_t partial = false;
DBG_FUNC(" IN ");
DBG_IO("SGL_Count :%d",num_sg);
if (0 == num_sg) {
goto out;
}
if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
for (i = 0; i < num_sg; i++, sgt++) {
sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
sgt->len= OS_GET_IO_SG_LEN(rcb,i);
sgt->flags= 0;
}
sg_array[num_sg - 1].flags = SG_FLAG_LAST;
} else {
/**
SGL Chaining
**/
sg_chain = rcb->sg_chain_virt;
sgt->addr = rcb->sg_chain_dma;
sgt->len = num_sg * sizeof(sgt_t);
sgt->flags = SG_FLAG_CHAIN;
sgt = sg_chain;
for (i = 0; i < num_sg; i++, sgt++) {
sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
sgt->len = OS_GET_IO_SG_LEN(rcb,i);
sgt->flags = 0;
}
sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
num_sg = 1;
partial = true;
}
out:
iu_hdr->iu_length = num_sg * sizeof(sgt_t);
DBG_FUNC(" OUT ");
return partial;
}
/*Subroutine used to Build the RAID request */
static void
pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
{
DBG_FUNC(" IN ");
raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
raid_req->header.comp_feature = 0;
raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
raid_req->work_area[0] = 0;
raid_req->work_area[1] = 0;
raid_req->request_id = rcb->tag;
raid_req->nexus_id = 0;
raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
sizeof(raid_req->lun_number));
raid_req->protocol_spec = 0;
raid_req->data_direction = rcb->data_dir;
raid_req->reserved1 = 0;
raid_req->fence = 0;
raid_req->error_index = raid_req->request_id;
raid_req->reserved2 = 0;
raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
raid_req->command_priority = 0;
raid_req->reserved3 = 0;
raid_req->reserved4 = 0;
raid_req->reserved5 = 0;
/* As cdb and additional_cdb_bytes are contiguous,
update them in a single statement */
memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
#if 0
DBG_IO("CDB :");
for(i = 0; i < rcb->cmdlen ; i++)
DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
#endif
switch (rcb->cmdlen) {
case 6:
case 10:
case 12:
case 16:
raid_req->additional_cdb_bytes_usage =
PQI_ADDITIONAL_CDB_BYTES_0;
break;
case 20:
raid_req->additional_cdb_bytes_usage =
PQI_ADDITIONAL_CDB_BYTES_4;
break;
case 24:
raid_req->additional_cdb_bytes_usage =
PQI_ADDITIONAL_CDB_BYTES_8;
break;
case 28:
raid_req->additional_cdb_bytes_usage =
PQI_ADDITIONAL_CDB_BYTES_12;
break;
case 32:
default: /* todo:review again */
raid_req->additional_cdb_bytes_usage =
PQI_ADDITIONAL_CDB_BYTES_16;
break;
}
/* Frame SGL Descriptor */
raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
&raid_req->header, num_elem_alloted);
raid_req->header.iu_length +=
offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
#if 0
DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number);
DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
#endif
rcb->success_cmp_callback = pqisrc_process_io_response_success;
rcb->error_cmp_callback = pqisrc_process_raid_response_error;
rcb->resp_qid = raid_req->response_queue_id;
DBG_FUNC(" OUT ");
}
/*Subroutine used to Build the AIO request */
static void
pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
{
DBG_FUNC(" IN ");
aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
aio_req->header.comp_feature = 0;
aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
aio_req->work_area[0] = 0;
aio_req->work_area[1] = 0;
aio_req->req_id = rcb->tag;
aio_req->res1[0] = 0;
aio_req->res1[1] = 0;
aio_req->nexus = rcb->ioaccel_handle;
aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
aio_req->data_dir = rcb->data_dir;
aio_req->mem_type = 0;
aio_req->fence = 0;
aio_req->res2 = 0;
aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
aio_req->cmd_prio = 0;
aio_req->res3 = 0;
aio_req->err_idx = aio_req->req_id;
aio_req->cdb_len = rcb->cmdlen;
memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
#if 0
DBG_IO("CDB : \n");
for(int i = 0; i < rcb->cmdlen ; i++)
DBG_IO(" 0x%x \n",aio_req->cdb[i]);
#endif
memset(aio_req->lun,0,sizeof(aio_req->lun));
memset(aio_req->res4,0,sizeof(aio_req->res4));
if(rcb->encrypt_enable == true) {
aio_req->encrypt_enable = true;
aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
} else {
aio_req->encrypt_enable = 0;
aio_req->encrypt_key_index = 0;
aio_req->encrypt_twk_high = 0;
aio_req->encrypt_twk_low = 0;
}
/* Frame SGL Descriptor */
aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
&aio_req->header, num_elem_alloted);
aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
sizeof(iu_header_t);
#if 0
DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus);
DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
#endif
rcb->success_cmp_callback = pqisrc_process_io_response_success;
rcb->error_cmp_callback = pqisrc_process_aio_response_error;
rcb->resp_qid = aio_req->response_queue_id;
DBG_FUNC(" OUT ");
}
/*Function used to build and send RAID/AIO */
int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
{
ib_queue_t *ib_q_array = softs->op_aio_ib_q;
ib_queue_t *ib_q = NULL;
char *ib_iu = NULL;
IO_PATH_T io_path = AIO_PATH;
uint32_t TraverseCount = 0;
int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
int qindex = first_qindex;
uint32_t num_op_ib_q = softs->num_op_aio_ibq;
uint32_t num_elem_needed;
uint32_t num_elem_alloted = 0;
pqi_scsi_dev_t *devp = rcb->dvp;
uint8_t raidbypass_cdb[16];
DBG_FUNC(" IN ");
rcb->cdbp = OS_GET_CDBP(rcb);
if(IS_AIO_PATH(devp)) {
/** IO for Physical Drive **/
/** Send in AIO PATH**/
rcb->ioaccel_handle = devp->ioaccel_handle;
} else {
int ret = PQI_STATUS_FAILURE;
/** IO for RAID Volume **/
if (devp->offload_enabled) {
/** ByPass IO ,Send in AIO PATH **/
ret = pqisrc_send_scsi_cmd_raidbypass(softs,
devp, rcb, raidbypass_cdb);
}
if (PQI_STATUS_FAILURE == ret) {
/** Send in RAID PATH **/
io_path = RAID_PATH;
num_op_ib_q = softs->num_op_raid_ibq;
ib_q_array = softs->op_raid_ib_q;
} else {
rcb->cdbp = raidbypass_cdb;
}
}
num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
DBG_IO("num_elem_needed :%d",num_elem_needed);
do {
uint32_t num_elem_available;
ib_q = (ib_q_array + qindex);
PQI_LOCK(&ib_q->lock);
num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
*(ib_q->ci_virt_addr), ib_q->num_elem);
DBG_IO("num_elem_avialable :%d\n",num_elem_available);
if(num_elem_available >= num_elem_needed) {
num_elem_alloted = num_elem_needed;
break;
}
DBG_IO("Current queue is busy! Hop to next queue\n");
PQI_UNLOCK(&ib_q->lock);
qindex = (qindex + 1) % num_op_ib_q;
if(qindex == first_qindex) {
if (num_elem_needed == 1)
break;
TraverseCount += 1;
num_elem_needed = 1;
}
}while(TraverseCount < 2);
DBG_IO("num_elem_alloted :%d",num_elem_alloted);
if (num_elem_alloted == 0) {
DBG_WARN("OUT: IB Queues were full\n");
return PQI_STATUS_QFULL;
}
/* Get IB Queue Slot address to build IU */
ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
if(io_path == AIO_PATH) {
/** Build AIO structure **/
pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
num_elem_alloted);
} else {
/** Build RAID structure **/
pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
num_elem_alloted);
}
rcb->req_pending = true;
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
PQI_UNLOCK(&ib_q->lock);
DBG_FUNC(" OUT ");
return PQI_STATUS_SUCCESS;
}
/* Subroutine used to set encryption info as part of RAID bypass IO*/
static inline void pqisrc_set_enc_info(
struct pqi_enc_info *enc_info, struct raid_map *raid_map,
uint64_t first_block)
{
uint32_t volume_blk_size;
/*
* Set the encryption tweak values based on logical block address.
* If the block size is 512, the tweak value is equal to the LBA.
* For other block sizes, tweak value is (LBA * block size) / 512.
*/
volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
if (volume_blk_size != 512)
first_block = (first_block * volume_blk_size) / 512;
enc_info->data_enc_key_index =
GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
}
/*
* Attempt to perform offload RAID mapping for a logical volume I/O.
*/
#define HPSA_RAID_0 0
#define HPSA_RAID_4 1
#define HPSA_RAID_1 2 /* also used for RAID 10 */
#define HPSA_RAID_5 3 /* also used for RAID 50 */
#define HPSA_RAID_51 4
#define HPSA_RAID_6 5 /* also used for RAID 60 */
#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
#define HPSA_RAID_MAX HPSA_RAID_ADM
#define HPSA_RAID_UNKNOWN 0xff
/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
uint32_t *blk_cnt) {
switch (cdb[0]) {
case SCMD_WRITE_6:
*is_write = true;
case SCMD_READ_6:
*fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
(cdb[2] << 8) | cdb[3]);
*blk_cnt = (uint32_t)cdb[4];
if (*blk_cnt == 0)
*blk_cnt = 256;
break;
case SCMD_WRITE_10:
*is_write = true;
case SCMD_READ_10:
*fst_blk = (uint64_t)GET_BE32(&cdb[2]);
*blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
break;
case SCMD_WRITE_12:
*is_write = true;
case SCMD_READ_12:
*fst_blk = (uint64_t)GET_BE32(&cdb[2]);
*blk_cnt = GET_BE32(&cdb[6]);
break;
case SCMD_WRITE_16:
*is_write = true;
case SCMD_READ_16:
*fst_blk = GET_BE64(&cdb[2]);
*blk_cnt = GET_BE32(&cdb[10]);
break;
default:
/* Process via normal I/O path. */
return PQI_STATUS_FAILURE;
}
return PQI_STATUS_SUCCESS;
}
/*
* Function used to build and send RAID bypass request to the adapter
*/
int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
{
struct raid_map *raid_map;
boolean_t is_write = false;
uint32_t map_idx;
uint64_t fst_blk, lst_blk;
uint32_t blk_cnt, blks_per_row;
uint64_t fst_row, lst_row;
uint32_t fst_row_offset, lst_row_offset;
uint32_t fst_col, lst_col;
uint32_t r5or6_blks_per_row;
uint64_t r5or6_fst_row, r5or6_lst_row;
uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
uint32_t r5or6_fst_col, r5or6_lst_col;
uint16_t data_disks_per_row, total_disks_per_row;
uint16_t layout_map_count;
uint32_t stripesz;
uint16_t strip_sz;
uint32_t fst_grp, lst_grp, cur_grp;
uint32_t map_row;
uint64_t disk_block;
uint32_t disk_blk_cnt;
uint8_t cdb_length;
int offload_to_mirror;
int i;
DBG_FUNC(" IN \n");
DBG_IO("!!!!!\n");
/* Check for eligible opcode, get LBA and block count. */
memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
for(i = 0; i < rcb->cmdlen ; i++)
DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
if(check_for_scsi_opcode(cdb, &is_write,
&fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
return PQI_STATUS_FAILURE;
/* Check for write to non-RAID-0. */
if (is_write && device->raid_level != SA_RAID_0)
return PQI_STATUS_FAILURE;;
if(blk_cnt == 0)
return PQI_STATUS_FAILURE;
lst_blk = fst_blk + blk_cnt - 1;
raid_map = device->raid_map;
/* Check for invalid block or wraparound. */
if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
lst_blk < fst_blk)
return PQI_STATUS_FAILURE;
data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
/* Calculate stripe information for the request. */
blks_per_row = data_disks_per_row * strip_sz;
/* use __udivdi3 ? */
fst_row = fst_blk / blks_per_row;
lst_row = lst_blk / blks_per_row;
fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
fst_col = fst_row_offset / strip_sz;
lst_col = lst_row_offset / strip_sz;
/* If this isn't a single row/column then give to the controller. */
if (fst_row != lst_row || fst_col != lst_col)
return PQI_STATUS_FAILURE;
/* Proceeding with driver mapping. */
total_disks_per_row = data_disks_per_row +
GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
GET_LE16((uint8_t *)(&raid_map->row_cnt));
map_idx = (map_row * total_disks_per_row) + fst_col;
/* RAID 1 */
if (device->raid_level == SA_RAID_1) {
if (device->offload_to_mirror)
map_idx += data_disks_per_row;
device->offload_to_mirror = !device->offload_to_mirror;
} else if (device->raid_level == SA_RAID_ADM) {
/* RAID ADM */
/*
* Handles N-way mirrors (R1-ADM) and R10 with # of drives
* divisible by 3.
*/
offload_to_mirror = device->offload_to_mirror;
if (offload_to_mirror == 0) {
/* use physical disk in the first mirrored group. */
map_idx %= data_disks_per_row;
} else {
do {
/*
* Determine mirror group that map_idx
* indicates.
*/
cur_grp = map_idx / data_disks_per_row;
if (offload_to_mirror != cur_grp) {
if (cur_grp <
layout_map_count - 1) {
/*
* Select raid index from
* next group.
*/
map_idx += data_disks_per_row;
cur_grp++;
} else {
/*
* Select raid index from first
* group.
*/
map_idx %= data_disks_per_row;
cur_grp = 0;
}
}
} while (offload_to_mirror != cur_grp);
}
/* Set mirror group to use next time. */
offload_to_mirror =
(offload_to_mirror >= layout_map_count - 1) ?
0 : offload_to_mirror + 1;
if(offload_to_mirror >= layout_map_count)
return PQI_STATUS_FAILURE;
device->offload_to_mirror = offload_to_mirror;
/*
* Avoid direct use of device->offload_to_mirror within this
* function since multiple threads might simultaneously
* increment it beyond the range of device->layout_map_count -1.
*/
} else if ((device->raid_level == SA_RAID_5 ||
device->raid_level == SA_RAID_6) && layout_map_count > 1) {
/* RAID 50/60 */
/* Verify first and last block are in same RAID group */
r5or6_blks_per_row = strip_sz * data_disks_per_row;
stripesz = r5or6_blks_per_row * layout_map_count;
fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
if (fst_grp != lst_grp)
return PQI_STATUS_FAILURE;
/* Verify request is in a single row of RAID 5/6 */
fst_row = r5or6_fst_row =
fst_blk / stripesz;
r5or6_lst_row = lst_blk / stripesz;
if (r5or6_fst_row != r5or6_lst_row)
return PQI_STATUS_FAILURE;
/* Verify request is in a single column */
fst_row_offset = r5or6_fst_row_offset =
(uint32_t)((fst_blk % stripesz) %
r5or6_blks_per_row);
r5or6_lst_row_offset =
(uint32_t)((lst_blk % stripesz) %
r5or6_blks_per_row);
fst_col = r5or6_fst_row_offset / strip_sz;
r5or6_fst_col = fst_col;
r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
if (r5or6_fst_col != r5or6_lst_col)
return PQI_STATUS_FAILURE;
/* Request is eligible */
map_row =
((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
GET_LE16((uint8_t *)(&raid_map->row_cnt));
map_idx = (fst_grp *
(GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
total_disks_per_row)) +
(map_row * total_disks_per_row) + fst_col;
}
if (map_idx >= RAID_MAP_MAX_ENTRIES)
return PQI_STATUS_FAILURE;
rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
fst_row * strip_sz +
(fst_row_offset - fst_col * strip_sz);
disk_blk_cnt = blk_cnt;
/* Handle differing logical/physical block sizes. */
if (raid_map->phys_blk_shift) {
disk_block <<= raid_map->phys_blk_shift;
disk_blk_cnt <<= raid_map->phys_blk_shift;
}
if (disk_blk_cnt > 0xffff)
return PQI_STATUS_FAILURE;
/* Build the new CDB for the physical disk I/O. */
if (disk_block > 0xffffffff) {
cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
cdb[1] = 0;
PUT_BE64(disk_block, &cdb[2]);
PUT_BE32(disk_blk_cnt, &cdb[10]);
cdb[14] = 0;
cdb[15] = 0;
cdb_length = 16;
} else {
cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
cdb[1] = 0;
PUT_BE32(disk_block, &cdb[2]);
cdb[6] = 0;
PUT_BE16(disk_blk_cnt, &cdb[7]);
cdb[9] = 0;
cdb_length = 10;
}
if (GET_LE16((uint8_t *)(&raid_map->flags)) &
RAID_MAP_ENCRYPTION_ENABLED) {
pqisrc_set_enc_info(&rcb->enc_info, raid_map,
fst_blk);
rcb->encrypt_enable = true;
} else {
rcb->encrypt_enable = false;
}
rcb->cmdlen = cdb_length;
DBG_FUNC("OUT");
return PQI_STATUS_SUCCESS;
}
/* Function used to submit a TMF to the adater */
int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
rcb_t *rcb, int req_id, int tmf_type)
{
int rval = PQI_STATUS_SUCCESS;
pqi_tmf_req_t tmf_req;
memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
DBG_FUNC("IN");
tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
tmf_req.req_id = rcb->tag;
memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
tmf_req.tmf = tmf_type;
tmf_req.req_id_to_manage = req_id;
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
tmf_req.obq_id_to_manage = rcb->resp_qid;
rcb->req_pending = true;
rval = pqisrc_submit_cmnd(softs,
&softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
if (rval != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit command rval=%d\n", rval);
return rval;
}
rval = pqisrc_wait_on_condition(softs, rcb);
if (rval != PQI_STATUS_SUCCESS){
DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
rcb->status = REQUEST_FAILED;
}
if (rcb->status != REQUEST_SUCCESS) {
DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
"stat:0x%x\n", tmf_type, rcb->status);
rval = PQI_STATUS_FAILURE;
}
DBG_FUNC("OUT");
return rval;
}

View File

@ -0,0 +1,236 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/*
* Process internal RAID response in the case of success.
*/
void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,
rcb_t *rcb)
{
DBG_FUNC("IN");
rcb->status = REQUEST_SUCCESS;
rcb->req_pending = false;
DBG_FUNC("OUT");
}
/*
* Process internal RAID response in the case of failure.
*/
void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
raid_path_error_info_elem_t error_info;
DBG_FUNC("IN");
rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
(err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
rcb->status = REQUEST_SUCCESS;
memcpy(&error_info, rcb->error_info, sizeof(error_info));
DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n",
error_info.status, error_info.data_in_result, error_info.data_out_result);
if (error_info.status != 0)
rcb->status = REQUEST_FAILED;
if (error_info.data_in_result != PQI_RAID_DATA_IN_OUT_GOOD)
rcb->status = REQUEST_FAILED;
if (error_info.data_out_result != PQI_RAID_DATA_IN_OUT_GOOD)
rcb->status = REQUEST_FAILED;
rcb->req_pending = false;
DBG_FUNC("OUT");
}
/*
* Process the AIO/RAID IO in the case of success.
*/
void pqisrc_process_io_response_success(pqisrc_softstate_t *softs,
rcb_t *rcb)
{
DBG_FUNC("IN");
os_io_response_success(rcb);
DBG_FUNC("OUT");
}
/*
* Process the error info for AIO in the case of failure.
*/
void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
aio_path_error_info_elem_t *err_info = NULL;
DBG_FUNC("IN");
err_info = (aio_path_error_info_elem_t*)
softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
return;
}
os_aio_response_error(rcb, err_info);
DBG_FUNC("OUT");
}
/*
* Process the error info for RAID IO in the case of failure.
*/
void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
raid_path_error_info_elem_t *err_info = NULL;
DBG_FUNC("IN");
err_info = (raid_path_error_info_elem_t*)
softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
return;
}
os_raid_response_error(rcb, err_info);
DBG_FUNC("OUT");
}
/*
* Process the Task Management function response.
*/
int pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
pqi_tmf_resp_t *tmf_resp)
{
int ret = REQUEST_SUCCESS;
uint32_t tag = (uint32_t)tmf_resp->req_id;
rcb_t *rcb = &softs->rcb[tag];
ASSERT(rcb->tag == tag);
DBG_FUNC("IN\n");
switch (tmf_resp->resp_code) {
case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE:
case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED:
ret = REQUEST_SUCCESS;
break;
default:
DBG_ERR("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
ret = REQUEST_FAILED;
break;
}
rcb->status = ret;
rcb->req_pending = false;
DBG_FUNC("OUT");
return ret;
}
/*
* Function used to process the response from the adapter
* which is invoked by IRQ handler.
*/
void
pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
{
ob_queue_t *ob_q;
struct pqi_io_response *response;
uint32_t oq_pi, oq_ci;
DBG_FUNC("IN");
OS_ATOMIC64_INC(softs, num_intrs);
ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
oq_ci = ob_q->ci_local;
oq_pi = *(ob_q->pi_virt_addr);
DBG_INFO("ci : %d pi : %d qid : %d\n", oq_ci, oq_pi, ob_q->q_id);
while (1) {
rcb_t *rcb = NULL;
uint32_t tag = 0;
uint32_t offset;
if (oq_pi == oq_ci)
break;
/* Get the response */
offset = oq_ci * ob_q->elem_size;
response = (struct pqi_io_response *)(ob_q->array_virt_addr +
offset);
tag = response->request_id;
rcb = &softs->rcb[tag];
/* Make sure we are processing a valid response. */
ASSERT(rcb->tag == tag && rcb->req_pending);
rcb->req_pending = false;
DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type);
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
rcb->success_cmp_callback(softs, rcb);
break;
case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
rcb->error_cmp_callback(softs, rcb, LE_16(response->error_index));
break;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
rcb->req_pending = false;
break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
rcb->status = pqisrc_process_task_management_response(softs, (void *)response);
break;
default:
DBG_ERR("Invalid Response IU 0x%x\n",response->header.iu_type);
break;
}
oq_ci = (oq_ci + 1) % ob_q->num_elem;
}
ob_q->ci_local = oq_ci;
PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, ob_q->ci_local );
DBG_FUNC("OUT");
}

View File

@ -0,0 +1,451 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
/* */
void sis_disable_msix(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
DBG_FUNC("IN\n");
db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR);
db_reg &= ~SIS_ENABLE_MSIX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
DBG_FUNC("OUT\n");
}
/* Trigger a NMI as part of taking controller offline procedure */
void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(TRIGGER_NMI_SIS));
DBG_FUNC("OUT\n");
}
/* Switch the adapter back to SIS mode during uninitialization */
int pqisrc_reenable_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t timeout = SIS_ENABLE_TIMEOUT;
DBG_FUNC("IN\n");
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(REENABLE_SIS));
COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
REENABLE_SIS) == 0), timeout)
if (!timeout) {
DBG_WARN(" [ %s ] failed to re enable sis\n",__func__);
ret = PQI_STATUS_TIMEOUT;
}
DBG_FUNC("OUT\n");
return ret;
}
/* Validate the FW status PQI_CTRL_KERNEL_UP_AND_RUNNING */
int pqisrc_check_fw_status(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t timeout = SIS_STATUS_OK_TIMEOUT;
DBG_FUNC("IN\n");
OS_SLEEP(1000000);
COND_WAIT((GET_FW_STATUS(softs) &
PQI_CTRL_KERNEL_UP_AND_RUNNING), timeout);
if (!timeout) {
DBG_ERR("FW check status timedout\n");
ret = PQI_STATUS_TIMEOUT;
}
DBG_FUNC("OUT\n");
return ret;
}
/* Function used to submit a SIS command to the adapter */
static int pqisrc_send_sis_cmd(pqisrc_softstate_t *softs,
uint32_t *mb)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
uint32_t timeout = SIS_CMD_COMPLETE_TIMEOUT;
int val;
DBG_FUNC("IN\n");
/* Copy Command to mailbox */
for (i = 0; i < 6; i++)
PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i],
LEGACY_SIS_SRCV_MAILBOX+i*4, LE_32(mb[i]));
PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr,
LEGACY_SIS_ODBR_R, LE_32(0x1000));
/* Submit the command */
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(SIS_CMD_SUBMIT));
#ifdef SIS_POLL_WAIT
/* Wait for 20 milli sec to poll */
OS_BUSYWAIT(SIS_POLL_START_WAIT_TIME);
#endif
val = PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R);
DBG_FUNC("val : %x\n",val);
/* Spin waiting for the command to complete */
COND_WAIT((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
SIS_CMD_COMPLETE), timeout);
if (!timeout) {
DBG_ERR("Sync command %x, timedout\n", mb[0]);
ret = PQI_STATUS_TIMEOUT;
goto err_out;
}
/* Check command status */
mb[0] = LE_32(PCI_MEM_GET32(softs, &softs->ioa_reg->mb[0], LEGACY_SIS_SRCV_MAILBOX));
if (mb[0] != SIS_CMD_STATUS_SUCCESS) {
DBG_ERR("SIS cmd failed with status = 0x%x\n",
mb[0]);
ret = PQI_STATUS_FAILURE;
goto err_out;
}
/* Copy the mailbox back */
for (i = 1; i < 6; i++)
mb[i] = LE_32(PCI_MEM_GET32(softs, &softs->ioa_reg->mb[i], LEGACY_SIS_SRCV_MAILBOX+i*4));
DBG_FUNC("OUT\n");
return ret;
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/* First SIS command for the adapter to check PQI support */
int pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
uint32_t *prop, uint32_t *ext_prop)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
DBG_FUNC("IN\n");
mb[0] = SIS_CMD_GET_ADAPTER_PROPERTIES;
ret = pqisrc_send_sis_cmd(softs, mb);
if (!ret) {
DBG_INFO("GET_PROPERTIES prop = %x, ext_prop = %x\n",
mb[1], mb[4]);
*prop = mb[1];
*ext_prop = mb[4];
}
DBG_FUNC("OUT\n");
return ret;
}
/* Second SIS command to the adapter GET_COMM_PREFERRED_SETTINGS */
int pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
DBG_FUNC("IN\n");
mb[0] = SIS_CMD_GET_COMM_PREFERRED_SETTINGS;
ret = pqisrc_send_sis_cmd(softs, mb);
if (!ret) {
/* 31:16 maximum command size in KB */
softs->pref_settings.max_cmd_size = mb[1] >> 16;
/* 15:00: Maximum FIB size in bytes */
softs->pref_settings.max_fib_size = mb[1] & 0x0000FFFF;
DBG_INFO("cmd size = %x, fib size = %x\n",
softs->pref_settings.max_cmd_size,
softs->pref_settings.max_fib_size);
}
DBG_FUNC("OUT\n");
return ret;
}
/* Get supported PQI capabilities from the adapter */
int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
DBG_FUNC("IN\n");
mb[0] = SIS_CMD_GET_PQI_CAPABILITIES;
ret = pqisrc_send_sis_cmd(softs, mb);
if (!ret) {
softs->pqi_cap.max_sg_elem = mb[1];
softs->pqi_cap.max_transfer_size = mb[2];
softs->pqi_cap.max_outstanding_io = mb[3];
#ifdef DMA_ATTR
softs->os_specific.buf_dma_attr.dma_attr_sgllen =
softs->pqi_cap.max_sg_elem;
softs->os_specific.buf_dma_attr.dma_attr_maxxfer =
softs->pqi_cap.max_transfer_size;
softs->os_specific.buf_dma_attr.dma_attr_count_max =
softs->pqi_cap.max_transfer_size - 1;
#endif
softs->pqi_cap.conf_tab_off = mb[4];
softs->pqi_cap.conf_tab_sz = mb[5];
DBG_INFO("max_sg_elem = %x\n",
softs->pqi_cap.max_sg_elem);
DBG_INFO("max_transfer_size = %x\n",
softs->pqi_cap.max_transfer_size);
DBG_INFO("max_outstanding_io = %x\n",
softs->pqi_cap.max_outstanding_io);
}
DBG_FUNC("OUT\n");
return ret;
}
/* Send INIT STRUCT BASE ADDR - one of the SIS command */
int pqisrc_init_struct_base(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t elem_size = 0;
uint32_t num_elem = 0;
struct dma_mem init_struct_mem = {0};
struct init_base_struct *init_struct = NULL;
uint32_t mb[6] = {0};
DBG_FUNC("IN\n");
/* Allocate init struct */
memset(&init_struct_mem, 0, sizeof(struct dma_mem));
init_struct_mem.size = sizeof(struct init_base_struct);
init_struct_mem.align = PQISRC_INIT_STRUCT_DMA_ALIGN;
init_struct_mem.tag = "init_struct";
ret = os_dma_mem_alloc(softs, &init_struct_mem);
if (ret) {
DBG_ERR("Failed to Allocate error buffer ret : %d\n",
ret);
goto err_out;
}
/* Calculate error buffer size */
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
* The rcb and error buffer will be accessed by using the tag as index
* As 0 tag index is not used, we need to allocate one extra.
*/
num_elem = softs->pqi_cap.max_outstanding_io + 1;
elem_size = PQISRC_ERR_BUF_ELEM_SIZE;
softs->err_buf_dma_mem.size = num_elem * elem_size;
/* Allocate error buffer */
softs->err_buf_dma_mem.align = PQISRC_ERR_BUF_DMA_ALIGN;
softs->err_buf_dma_mem.tag = "error_buffer";
ret = os_dma_mem_alloc(softs, &softs->err_buf_dma_mem);
if (ret) {
DBG_ERR("Failed to Allocate error buffer ret : %d\n",
ret);
goto err_error_buf_alloc;
}
/* Fill init struct */
init_struct = (struct init_base_struct *)DMA_TO_VIRT(&init_struct_mem);
init_struct->revision = PQISRC_INIT_STRUCT_REVISION;
init_struct->flags = 0;
init_struct->err_buf_paddr_l = DMA_PHYS_LOW(&softs->err_buf_dma_mem);
init_struct->err_buf_paddr_h = DMA_PHYS_HIGH(&softs->err_buf_dma_mem);
init_struct->err_buf_elem_len = elem_size;
init_struct->err_buf_num_elem = num_elem;
mb[0] = SIS_CMD_INIT_BASE_STRUCT_ADDRESS;
mb[1] = DMA_PHYS_LOW(&init_struct_mem);
mb[2] = DMA_PHYS_HIGH(&init_struct_mem);
mb[3] = init_struct_mem.size;
ret = pqisrc_send_sis_cmd(softs, mb);
if (ret)
goto err_sis_cmd;
DBG_FUNC("OUT\n");
os_dma_mem_free(softs, &init_struct_mem);
return ret;
err_sis_cmd:
os_dma_mem_free(softs, &softs->err_buf_dma_mem);
err_error_buf_alloc:
os_dma_mem_free(softs, &init_struct_mem);
err_out:
DBG_FUNC("OUT failed %d\n", ret);
return PQI_STATUS_FAILURE;
}
/*
* SIS initialization of the adapter in a sequence of
* - GET_ADAPTER_PROPERTIES
* - GET_COMM_PREFERRED_SETTINGS
* - GET_PQI_CAPABILITIES
* - INIT_STRUCT_BASE ADDR
*/
int pqisrc_sis_init(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t prop = 0;
uint32_t ext_prop = 0;
DBG_FUNC("IN\n");
ret = pqisrc_force_sis(softs);
if (ret) {
DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
goto err_out;
}
/* Check FW status ready */
ret = pqisrc_check_fw_status(softs);
if (ret) {
DBG_ERR("PQI Controller is not ready !!!\n");
goto err_out;
}
/* Check For PQI support(19h) */
ret = pqisrc_get_adapter_properties(softs, &prop, &ext_prop);
if (ret) {
DBG_ERR("Failed to get adapter properties\n");
goto err_out;
}
if (!((prop & SIS_SUPPORT_EXT_OPT) &&
(ext_prop & SIS_SUPPORT_PQI))) {
DBG_ERR("PQI Mode Not Supported\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
softs->pqi_reset_quiesce_allowed = false;
if (ext_prop & SIS_SUPPORT_PQI_RESET_QUIESCE)
softs->pqi_reset_quiesce_allowed = true;
/* Send GET_COMM_PREFERRED_SETTINGS (26h) */
ret = pqisrc_get_preferred_settings(softs);
if (ret) {
DBG_ERR("Failed to get adapter pref settings\n");
goto err_out;
}
/* Get PQI settings , 3000h*/
ret = pqisrc_get_sis_pqi_cap(softs);
if (ret) {
DBG_ERR("Failed to get PQI Capabilities\n");
goto err_out;
}
/* We need to allocate DMA memory here ,
* Do any os specific DMA setup.
*/
ret = os_dma_setup(softs);
if (ret) {
DBG_ERR("Failed to Setup DMA\n");
goto err_out;
}
/* Init struct base addr */
ret = pqisrc_init_struct_base(softs);
if (ret) {
DBG_ERR("Failed to set init struct base addr\n");
goto err_dma;
}
DBG_FUNC("OUT\n");
return ret;
err_dma:
os_dma_destroy(softs);
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/* Deallocate the resources used during SIS initialization */
void pqisrc_sis_uninit(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
os_dma_mem_free(softs, &softs->err_buf_dma_mem);
os_dma_destroy(softs);
os_resource_free(softs);
pqi_reset(softs);
DBG_FUNC("OUT\n");
}
int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit)
{
int rcode = PQI_STATUS_SUCCESS;
uint32_t db_reg;
uint32_t loop_cnt = 0;
DBG_FUNC("IN\n");
while (1) {
db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR);
if ((db_reg & bit) == 0)
break;
if (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_PANIC) {
DBG_ERR("controller kernel panic\n");
rcode = PQI_STATUS_FAILURE;
break;
}
if (loop_cnt++ == SIS_DB_BIT_CLEAR_TIMEOUT_CNT) {
DBG_ERR("door-bell reg bit 0x%x not cleared\n", bit);
rcode = PQI_STATUS_TIMEOUT;
break;
}
OS_SLEEP(500);
}
DBG_FUNC("OUT\n");
return rcode;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,265 @@
/*-
* Copyright (c) 2018 Microsemi Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* $FreeBSD$ */
#include "smartpqi_includes.h"
#ifndef LOCKFREE_STACK
/*
* Function used to release the tag from taglist.
*/
void pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
{
OS_ACQUIRE_SPINLOCK(&(taglist->lock));
/*DBG_FUNC("IN\n");*/
ASSERT(taglist->num_elem < taglist->max_elem);
if (taglist->num_elem < taglist->max_elem) {
taglist->elem_array[taglist->tail] = elem;
taglist->num_elem++;
taglist->tail = (taglist->tail + 1) % taglist->max_elem;
}
OS_RELEASE_SPINLOCK(&taglist->lock);
/*DBG_FUNC("OUT\n");*/
}
/*
* Function used to get an unoccupied tag from the tag list.
*/
uint32_t pqisrc_get_tag(pqi_taglist_t *taglist)
{
uint32_t elem = INVALID_ELEM;
/*DBG_FUNC("IN\n");*/
OS_ACQUIRE_SPINLOCK(&taglist->lock);
ASSERT(taglist->num_elem > 0);
if (taglist->num_elem > 0) {
elem = taglist->elem_array[taglist->head];
taglist->num_elem--;
taglist->head = (taglist->head + 1) % taglist->max_elem;
}
OS_RELEASE_SPINLOCK(&taglist->lock);
/*DBG_FUNC("OUT got %d\n", elem);*/
return elem;
}
/*
* Initialize circular queue implementation of tag list.
*/
int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
uint32_t max_elem)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
DBG_FUNC("IN\n");
taglist->max_elem = max_elem;
taglist->num_elem = 0;
taglist->head = 0;
taglist->tail = 0;
taglist->elem_array = os_mem_alloc(softs,
(max_elem * sizeof(uint32_t)));
if (!(taglist->elem_array)) {
DBG_FUNC("Unable to allocate memory for taglist\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
if(ret){
DBG_ERR("tag lock initialization failed\n");
taglist->lockcreated=false;
goto err_lock;
}
taglist->lockcreated = true;
/* indices 1 to max_elem are considered as valid tags */
for (i=1; i <= max_elem; i++) {
softs->rcb[i].tag = INVALID_ELEM;
pqisrc_put_tag(taglist, i);
}
DBG_FUNC("OUT\n");
return ret;
err_lock:
os_mem_free(softs, (char *)taglist->elem_array,
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/*
* Destroy circular queue implementation of tag list.
*/
void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
{
DBG_FUNC("IN\n");
os_mem_free(softs, (char *)taglist->elem_array,
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
if(taglist->lockcreated==true){
os_uninit_spinlock(&taglist->lock);
taglist->lockcreated = false;
}
DBG_FUNC("OUT\n");
}
#else /* LOCKFREE_STACK */
/*
* Initialize circular queue implementation of tag list.
*/
int pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
uint32_t max_elem)
{
int ret = PQI_STATUS_SUCCESS;
int index = 0;
DBG_FUNC("IN\n");
/* indices 1 to max_elem are considered as valid tags */
stack->num_elements = max_elem + 1;
stack->head.data = 0;
DBG_INFO("Stack head address :%p\n",&stack->head);
/*Allocate memory for stack*/
stack->next_index_array = (uint32_t*)os_mem_alloc(softs,
(stack->num_elements * sizeof(uint32_t)));
if (!(stack->next_index_array)) {
DBG_ERR("Unable to allocate memory for stack\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
/* push all the entries to the stack */
for (index = 1; index < stack->num_elements ; index++) {
softs->rcb[index].tag = INVALID_ELEM;
pqisrc_put_tag(stack, index);
}
DBG_FUNC("OUT\n");
return ret;
err_out:
DBG_FUNC("Failed OUT\n");
return ret;
}
/*
* Destroy circular queue implementation of tag list.
*/
void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
{
DBG_FUNC("IN\n");
/* de-allocate stack memory */
if (stack->next_index_array) {
os_mem_free(softs,(char*)stack->next_index_array,
(stack->num_elements * sizeof(uint32_t)));
stack->next_index_array = NULL;
}
DBG_FUNC("OUT\n");
}
/*
* Function used to release the tag from taglist.
*/
void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
{
union head_list cur_head, new_head;
DBG_FUNC("IN\n");
DBG_INFO("push tag :%d\n",index);
if ( index >= stack->num_elements ) {
ASSERT(false);
DBG_ERR("Pushed Invalid index\n"); /* stack full */
return;
}
if ( stack->next_index_array[index] != 0) {
ASSERT(false);
DBG_ERR("Index already present as tag in the stack\n");
return;
}
do {
cur_head = stack->head;
/* increment seq_no */
new_head.top.seq_no = cur_head.top.seq_no + 1;
/* update the index at the top of the stack with the new index */
new_head.top.index = index;
/* Create a link to the previous index */
stack->next_index_array[index] = cur_head.top.index;
}while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
!= cur_head.data);
DBG_FUNC("OUT\n");
return;
}
/*
* Function used to get an unoccupied tag from the tag list.
*/
uint32_t pqisrc_get_tag(lockless_stack_t *stack)
{
union head_list cur_head, new_head;
DBG_FUNC("IN\n");
do {
cur_head = stack->head;
if (cur_head.top.index == 0) /* stack empty */
return INVALID_ELEM;
/* increment seq_no field */
new_head.top.seq_no = cur_head.top.seq_no + 1;
/* update the index at the top of the stack with the next index */
new_head.top.index = stack->next_index_array[cur_head.top.index];
}while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
!= cur_head.data);
stack->next_index_array[cur_head.top.index] = 0;
DBG_INFO("pop tag: %d\n",cur_head.top.index);
DBG_FUNC("OUT\n");
return cur_head.top.index; /*tag*/
}
#endif /* LOCKFREE_STACK */

View File

@ -355,6 +355,7 @@ SUBDIR= \
siis \
sis \
sk \
${_smartpqi} \
smbfs \
sn \
snp \
@ -729,6 +730,7 @@ _qlnx= qlnx
_sfxge= sfxge
_sgx= sgx
_sgx_linux= sgx_linux
_smartpqi= smartpqi
.if ${MK_BHYVE} != "no" || defined(ALL_MODULES)
_vmm= vmm

View File

@ -0,0 +1,12 @@
# 5/10/2017
# $FreeBSD$
KMOD = smartpqi
.PATH: ${.CURDIR}/../../dev/${KMOD}
SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c
SRCS+= device_if.h bus_if.h pci_if.h opt_scsi.h opt_cam.h
.include <bsd.kmod.mk>