Similar to RAID map for Logical Drives, now JBOD map has been introduced for

JBODs. Driver has to sync the JBOD map with firmware and use sequence number
as a reference for JBOD FastPath I/O's.

Submitted by:   Sumit Saxena <sumit.saxena@broadcom.com>
Reviewed by:    Kashyap Desai <Kashyap.Desai@broadcom.com>
MFC after:  3 days
Sponsored by:   AVAGO Technologies
This commit is contained in:
kadesai 2016-05-13 12:05:02 +00:00
parent 9084fe7aaf
commit 25bd1246be
3 changed files with 385 additions and 69 deletions

View File

@ -80,6 +80,8 @@ static int mrsas_setup_irq(struct mrsas_softc *sc);
static int mrsas_alloc_mem(struct mrsas_softc *sc);
static int mrsas_init_fw(struct mrsas_softc *sc);
static int mrsas_setup_raidmap(struct mrsas_softc *sc);
static void megasas_setup_jbod_map(struct mrsas_softc *sc);
static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
static int mrsas_clear_intr(struct mrsas_softc *sc);
static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
@ -1076,7 +1078,14 @@ mrsas_free_mem(struct mrsas_softc *sc)
if (sc->ld_drv_map[i] != NULL)
free(sc->ld_drv_map[i], M_MRSAS);
}
for (i = 0; i < 2; i++) {
if (sc->jbodmap_phys_addr[i])
bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
if (sc->jbodmap_mem[i] != NULL)
bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
if (sc->jbodmap_tag[i] != NULL)
bus_dma_tag_destroy(sc->jbodmap_tag[i]);
}
/*
* Free version buffer memory
*/
@ -1997,6 +2006,78 @@ ABORT:
return (1);
}
/**
* megasas_setup_jbod_map - setup jbod map for FP seq_number.
* @sc: Adapter soft state
*
* Return 0 on success.
*/
void
megasas_setup_jbod_map(struct mrsas_softc *sc)
{
int i;
uint32_t pd_seq_map_sz;
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
sc->use_seqnum_jbod_fp = 0;
return;
}
if (sc->jbodmap_mem[0])
goto skip_alloc;
for (i = 0; i < 2; i++) {
if (bus_dma_tag_create(sc->mrsas_parent_tag,
4, 0,
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR,
NULL, NULL,
pd_seq_map_sz,
1,
pd_seq_map_sz,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->jbodmap_tag[i])) {
device_printf(sc->mrsas_dev,
"Cannot allocate jbod map tag.\n");
return;
}
if (bus_dmamem_alloc(sc->jbodmap_tag[i],
(void **)&sc->jbodmap_mem[i],
BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
device_printf(sc->mrsas_dev,
"Cannot allocate jbod map memory.\n");
return;
}
bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
sc->jbodmap_mem[i], pd_seq_map_sz,
mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
BUS_DMA_NOWAIT)) {
device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
return;
}
if (!sc->jbodmap_mem[i]) {
device_printf(sc->mrsas_dev,
"Cannot allocate memory for jbod map.\n");
sc->use_seqnum_jbod_fp = 0;
return;
}
}
skip_alloc:
if (!megasas_sync_pd_seq_num(sc, false) &&
!megasas_sync_pd_seq_num(sc, true))
sc->use_seqnum_jbod_fp = 1;
else
sc->use_seqnum_jbod_fp = 0;
device_printf(sc->mrsas_dev, "Jbod map is supported\n");
}
/*
* mrsas_init_fw: Initialize Firmware
* input: Adapter soft state
@ -2096,18 +2177,28 @@ mrsas_init_fw(struct mrsas_softc *sc)
if (sc->secure_jbod_support)
device_printf(sc->mrsas_dev, "FW supports SED \n");
if (sc->use_seqnum_jbod_fp)
device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
if (mrsas_setup_raidmap(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
return (1);
device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
"There seems to be some problem in the controller\n"
"Please contact to the SUPPORT TEAM if the problem persists\n");
}
megasas_setup_jbod_map(sc);
/* For pass-thru, get PD/LD list and controller info */
memset(sc->pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
mrsas_get_pd_list(sc);
if (mrsas_get_pd_list(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Get PD list failed.\n");
return (1);
}
memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
mrsas_get_ld_list(sc);
if (mrsas_get_ld_list(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
return (1);
}
/*
* Compute the max allowed sectors per IO: The controller info has
* two limits on max sectors. Driver should use the minimum of these
@ -2855,6 +2946,8 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
if (!mrsas_get_map_info(sc))
mrsas_sync_map_info(sc);
megasas_setup_jbod_map(sc);
memset(sc->pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
if (mrsas_get_pd_list(sc) != SUCCESS) {
@ -3086,6 +3179,9 @@ mrsas_get_ctrl_info(struct mrsas_softc *sc)
do_ocr = 0;
mrsas_update_ext_vd_details(sc);
sc->use_seqnum_jbod_fp =
sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
dcmd_timeout:
mrsas_free_ctlr_info_cmd(sc);
@ -3480,6 +3576,28 @@ mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd
cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
sc->mrsas_aen_triggered = 0;
}
/* FW has an updated PD sequence */
if ((cmd->frame->dcmd.opcode ==
MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
(cmd->frame->dcmd.mbox.b[0] == 1)) {
mtx_lock(&sc->raidmap_lock);
sc->jbod_seq_cmd = NULL;
mrsas_release_mfi_cmd(cmd);
if (cmd_status == MFI_STAT_OK) {
sc->pd_seq_map_id++;
/* Re-register a pd sync seq num cmd */
if (megasas_sync_pd_seq_num(sc, true))
sc->use_seqnum_jbod_fp = 0;
} else {
sc->use_seqnum_jbod_fp = 0;
device_printf(sc->mrsas_dev,
"Jbod map sync failed, status=%x\n", cmd_status);
}
mtx_unlock(&sc->raidmap_lock);
break;
}
/* See if got an event notification */
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
mrsas_complete_aen(sc, cmd);
@ -3542,9 +3660,10 @@ mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
}
if (sc->aen_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
if (sc->map_update_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
if (sc->jbod_seq_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@ -3606,6 +3725,85 @@ mrsas_flush_cache(struct mrsas_softc *sc)
return;
}
int
megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
{
int retcode = 0;
u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
uint32_t pd_seq_map_sz;
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
bus_addr_t pd_seq_h;
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
(MAX_PHYSICAL_DEVICES - 1));
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
device_printf(sc->mrsas_dev,
"Cannot alloc for ld map info cmd.\n");
return 1;
}
dcmd = &cmd->frame->dcmd;
pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
if (!pd_sync) {
device_printf(sc->mrsas_dev,
"Failed to alloc mem for jbod map info.\n");
mrsas_release_mfi_cmd(cmd);
return (ENOMEM);
}
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = (pd_seq_map_sz);
dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
if (pend) {
dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
dcmd->flags = (MFI_FRAME_DIR_WRITE);
sc->jbod_seq_cmd = cmd;
if (mrsas_issue_dcmd(sc, cmd)) {
device_printf(sc->mrsas_dev,
"Fail to send sync map info command.\n");
return 1;
} else
return 0;
} else
dcmd->flags = MFI_FRAME_DIR_READ;
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
device_printf(sc->mrsas_dev,
"driver supports max %d JBOD, but FW reports %d\n",
MAX_PHYSICAL_DEVICES, pd_sync->count);
retcode = -EINVAL;
}
if (!retcode)
sc->pd_seq_map_id++;
do_ocr = 0;
dcmd_timeout:
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
else
mrsas_release_mfi_cmd(cmd);
return (retcode);
}
/*
* mrsas_get_map_info: Load and validate RAID map input:
* Adapter instance soft state

View File

@ -577,6 +577,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
#define MRSAS_MAX_PD_CHANNELS 1
@ -863,6 +864,22 @@ struct IO_REQUEST_INFO {
u_int8_t pd_after_lb;
};
/*
* define MR_PD_CFG_SEQ structure for system PDs
*/
struct MR_PD_CFG_SEQ {
u_int16_t seqNum;
u_int16_t devHandle;
u_int8_t reserved[4];
} __packed;
struct MR_PD_CFG_SEQ_NUM_SYNC {
u_int32_t size;
u_int32_t count;
struct MR_PD_CFG_SEQ seq[1];
} __packed;
typedef struct _MR_LD_TARGET_SYNC {
u_int8_t targetId;
u_int8_t reserved;
@ -1927,7 +1944,12 @@ struct mrsas_ctrl_info {
u_int32_t supportCacheBypassModes:1;
u_int32_t supportSecurityonJBOD:1;
u_int32_t discardCacheDuringLDDelete:1;
u_int32_t reserved:12;
u_int32_t supportTTYLogCompression:1;
u_int32_t supportCPLDUpdate:1;
u_int32_t supportDiskCacheSettingForSysPDs:1;
u_int32_t supportExtendedSSCSize:1;
u_int32_t useSeqNumJbodFP:1;
u_int32_t reserved:7;
} adapterOperations3;
u_int8_t pad[0x800 - 0x7EC]; /* 0x7EC */
@ -2697,7 +2719,9 @@ struct mrsas_softc {
u_int8_t chain_offset_mfi_pthru;
u_int32_t map_sz;
u_int64_t map_id;
u_int64_t pd_seq_map_id;
struct mrsas_mfi_cmd *map_update_cmd;
struct mrsas_mfi_cmd *jbod_seq_cmd;
struct mrsas_mfi_cmd *aen_cmd;
u_int8_t fast_path_io;
void *chan;
@ -2708,6 +2732,12 @@ struct mrsas_softc {
u_int8_t do_timedout_reset;
u_int32_t reset_in_progress;
u_int32_t reset_count;
bus_dma_tag_t jbodmap_tag[2];
bus_dmamap_t jbodmap_dmamap[2];
void *jbodmap_mem[2];
bus_addr_t jbodmap_phys_addr[2];
bus_dma_tag_t raidmap_tag[2];
bus_dmamap_t raidmap_dmamap[2];
void *raidmap_mem[2];
@ -2751,6 +2781,7 @@ struct mrsas_softc {
LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
u_int8_t secure_jbod_support;
u_int8_t use_seqnum_jbod_fp;
u_int8_t max256vdSupport;
u_int16_t fw_supported_vd_count;
u_int16_t fw_supported_pd_count;

View File

@ -65,11 +65,14 @@ int
mrsas_map_request(struct mrsas_softc *sc,
struct mrsas_mpt_cmd *cmd, union ccb *ccb);
int
mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb);
int
mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, struct cam_sim *sim);
mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb);
int
mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
int
mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, u_int32_t device_id,
@ -415,6 +418,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
struct ccb_scsiio *csio = &(ccb->csio);
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u_int8_t cmd_type;
if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
ccb->ccb_h.status = CAM_REQ_CMP;
@ -517,19 +521,44 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
mtx_lock(&sc->raidmap_lock);
/* Check for IO type READ-WRITE targeted for Logical Volume */
if (mrsas_find_io_type(sim, ccb) == READ_WRITE_LDIO) {
cmd_type = mrsas_find_io_type(sim, ccb);
switch (cmd_type) {
case READ_WRITE_LDIO:
/* Build READ-WRITE IO for Logical Volume */
if (mrsas_build_ldio(sc, cmd, ccb)) {
device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
} else {
if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
break;
case NON_READ_WRITE_LDIO:
/* Build NON READ-WRITE IO for Logical Volume */
if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
break;
case READ_WRITE_SYSPDIO:
case NON_READ_WRITE_SYSPDIO:
if (sc->secure_jbod_support &&
(cmd_type == NON_READ_WRITE_SYSPDIO)) {
/* Build NON-RW IO for JBOD */
if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
device_printf(sc->mrsas_dev,
"Build SYSPDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
} else {
/* Build RW IO for JBOD */
if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
device_printf(sc->mrsas_dev,
"Build SYSPDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
}
}
mtx_unlock(&sc->raidmap_lock);
@ -668,7 +697,7 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
}
/*
* mrsas_build_ldio: Builds an LDIO command
* mrsas_build_ldio_rw: Builds an LDIO command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
@ -677,7 +706,7 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
* built successfully, otherwise it returns a 1.
*/
int
mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
@ -879,7 +908,52 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
}
/*
* mrsas_build_dcdb: Builds an DCDB command
* mrsas_build_ldio_nonrw: Builds an LDIO command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
*
* This function builds the LDIO command packet. It returns 0 if the command is
* built successfully, otherwise it returns a 1.
*/
int
mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
u_int32_t device_id;
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
io_request = cmd->io_request;
device_id = ccb_h->target_id;
/* FW path for LD Non-RW (SCSI management commands) */
io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = device_id;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
io_request->RaidContext.VirtualDiskTgtId = device_id;
io_request->LUN[1] = ccb_h->target_lun & 0xF;
io_request->DataLength = cmd->length;
if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
if (cmd->sge_count > MRSAS_MAX_SGL) {
device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
"max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
return (1);
}
io_request->RaidContext.numSGE = cmd->sge_count;
} else {
device_printf(sc->mrsas_dev, "Data map/load failed.\n");
return (1);
}
return (0);
}
/*
* mrsas_build_syspdio: Builds an DCDB command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
@ -888,74 +962,87 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
* is built successfully, otherwise it returns a 1.
*/
int
mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, struct cam_sim *sim)
mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
u_int32_t device_id;
MR_DRV_RAID_MAP_ALL *map_ptr;
MR_DRV_RAID_MAP_ALL *local_map_ptr;
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
io_request = cmd->io_request;
device_id = ccb_h->target_id;
map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
/*
* Check if this is RW for system PD or
* it's a NON RW for sys PD and there is NO secure jbod FW support
*/
if (cam_sim_bus(sim) == 1 &&
sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
io_request->RaidContext.regLockFlags = 0;
io_request->RaidContext.regLockRowLBA = 0;
io_request->RaidContext.regLockLength = 0;
/* If FW supports PD sequence number */
if (sc->use_seqnum_jbod_fp &&
sc->pd_list[device_id].driveType == 0x00) {
//printf("Using Drv seq num\n");
io_request->RaidContext.VirtualDiskTgtId = device_id + 255;
io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum;
io_request->DevHandle = pd_sync->seq[device_id].devHandle;
io_request->RaidContext.regLockFlags |=
(MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
io_request->RaidContext.Type = MPI2_TYPE_CUDA;
io_request->RaidContext.nseg = 0x1;
} else if (sc->fast_path_io) {
//printf("Using LD RAID map\n");
io_request->RaidContext.VirtualDiskTgtId = device_id;
io_request->RaidContext.configSeqNum = 0;
local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
io_request->DevHandle =
map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
io_request->RaidContext.RAIDFlags =
MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
cmd->request_desc->SCSIIO.MSIxIndex =
sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
if (sc->secure_jbod_support && (mrsas_find_io_type(sim, ccb) == NON_READ_WRITE_SYSPDIO)) {
/* system pd firmware path */
io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
} else {
/* system pd fast path */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
io_request->RaidContext.regLockFlags = 0;
io_request->RaidContext.regLockRowLBA = 0;
io_request->RaidContext.regLockLength = 0;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
/*
* NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
* Because the NON RW cmds will now go via FW Queue
* and not the Exception queue
*/
if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
}
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
} else {
/* FW path for SysPD or LD Non-RW (SCSI management commands) */
//printf("Using FW PATH\n");
/* Want to send all IO via FW path */
io_request->RaidContext.VirtualDiskTgtId = device_id;
io_request->RaidContext.configSeqNum = 0;
io_request->DevHandle = 0xFFFF;
}
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
cmd->request_desc->SCSIIO.MSIxIndex =
sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
if (!fp_possible) {
/* system pd firmware path */
io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = device_id;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
io_request->RaidContext.VirtualDiskTgtId = device_id;
} else {
/* system pd fast path */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
/*
* NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
* Because the NON RW cmds will now go via FW Queue
* and not the Exception queue
*/
io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
io_request->RaidContext.VirtualDiskTgtId = device_id;
io_request->LUN[1] = ccb_h->target_lun & 0xF;
io_request->DataLength = cmd->length;
if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
if (cmd->sge_count > sc->max_num_sge) {
if (cmd->sge_count > MRSAS_MAX_SGL) {
device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
"max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
return (1);