Fixed issues reported by coverity scan.

Approved by: mav
MFC after: 3 weeks
This commit is contained in:
Ram Kishore Vegesna 2019-01-23 17:34:01 +00:00
parent f9be23fd42
commit 4915e5c719
9 changed files with 116 additions and 69 deletions

View File

@ -1164,15 +1164,24 @@ ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason)
struct ocs_softc *ocs = node->ocs;
ocs_fcport *fcp = NULL;
ocs_fc_target_t *tgt = NULL;
uint32_t tgt_id;
int32_t tgt_id;
if (ocs == NULL) {
ocs_log_err(ocs,"OCS is NULL \n");
return -1;
}
fcp = node->sport->tgt_data;
if (fcp == NULL) {
ocs_log_err(ocs,"FCP is NULL \n");
return 0;
return -1;
}
tgt_id = ocs_tgt_find(fcp, node);
if (tgt_id == -1) {
ocs_log_err(ocs,"target is invalid\n");
return -1;
}
tgt = &fcp->tgt[tgt_id];
@ -1781,13 +1790,9 @@ ocs_initiator_io(struct ocs_softc *ocs, union ccb *ccb)
ocs_io_t *io = NULL;
ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL];
int32_t sgl_count;
ocs_fcport *fcp;
ocs_fcport *fcp = NULL;
fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)));
if (fcp == NULL) {
device_printf(ocs->dev, "%s: fcp is NULL\n", __func__);
return -1;
}
if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_LOST) {
device_printf(ocs->dev, "%s: device LOST %d\n", __func__,
@ -2250,8 +2255,11 @@ ocs_action(struct cam_sim *sim, union ccb *ccb)
}
case XPT_RESET_BUS:
if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE) == 0) {
ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
if (rc) {
ocs_log_debug(ocs, "Failed to bring port online"
" : %d\n", rc);
}
ocs_set_ccb_status(ccb, CAM_REQ_CMP);
} else {
ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);

View File

@ -242,10 +242,7 @@ ocs_hw_get_num_chutes(ocs_hw_t *hw)
static ocs_hw_rtn_e
ocs_hw_link_event_init(ocs_hw_t *hw)
{
if (hw == NULL) {
ocs_log_err(hw->os, "bad parameter hw=%p\n", hw);
return OCS_HW_RTN_ERROR;
}
ocs_hw_assert(hw);
hw->link.status = SLI_LINK_STATUS_MAX;
hw->link.topology = SLI_LINK_TOPO_NONE;
@ -1757,6 +1754,7 @@ ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
break;
case OCS_HW_MAX_VPORTS:
*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
break;
default:
ocs_log_test(hw->os, "unsupported property %#x\n", prop);
rc = OCS_HW_RTN_ERROR;
@ -1996,6 +1994,7 @@ ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
break;
case OCS_ESOC:
hw->config.esoc = value;
break;
case OCS_HW_HIGH_LOGIN_MODE:
rc = sli_set_hlm(&hw->sli, value);
break;
@ -4395,7 +4394,7 @@ ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, o
OCS_STAT(wq->use_count++);
return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
return OCS_HW_RTN_SUCCESS;
}
ocs_hw_rtn_e
@ -4696,7 +4695,7 @@ ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
}
/* fail if we don't have an overflow SGL registered */
if (io->ovfl_sgl == NULL) {
if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
return OCS_HW_RTN_ERROR;
}
@ -6321,6 +6320,11 @@ ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
if (!buf) {
ocs_log_err(hw->os, "no buffer for command\n");
return OCS_HW_RTN_NO_MEMORY;
}
sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
if (rc) {
@ -8486,7 +8490,14 @@ ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
break;
case SLI_QENTRY_WQ_RELEASE: {
uint32_t wq_id = rid;
uint32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
if (unlikely(index < 0)) {
ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n",
index, rid);
break;
}
hw_wq_t *wq = hw->hw_wq[index];
/* Submit any HW IOs that are on the WQ pending list */
@ -9300,7 +9311,8 @@ ocs_hw_cb_link(void *ctx, void *e)
hw->link.status = event->status;
for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
for (i = 0; i < SLI4_MAX_FCFI; i++) {
d = hw->domains[i];
if (d != NULL &&
hw->callback.domain != NULL) {
hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
@ -9322,6 +9334,9 @@ ocs_hw_cb_fip(void *ctx, void *e)
ocs_domain_t *domain = NULL;
sli4_fip_event_t *event = e;
ocs_hw_assert(event);
ocs_hw_assert(hw);
/* Find the associated domain object */
if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
ocs_domain_t *d = NULL;
@ -9330,7 +9345,8 @@ ocs_hw_cb_fip(void *ctx, void *e)
/* Clear VLINK is different from the other FIP events as it passes back
* a VPI instead of a FCF index. Check all attached SLI ports for a
* matching VPI */
for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
for (i = 0; i < SLI4_MAX_FCFI; i++) {
d = hw->domains[i];
if (d != NULL) {
ocs_sport_t *sport = NULL;
@ -11202,6 +11218,7 @@ target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
ocs_hw_io_t *io_next = NULL;
uint64_t ticks_current = ocs_get_os_ticks();
uint32_t sec_elapsed;
ocs_hw_rtn_e rc;
sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
@ -11213,34 +11230,39 @@ target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
/* loop through active WQE list and check for timeouts */
ocs_lock(&hw->io_lock);
ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
/*
* If elapsed time > timeout, abort it. No need to check type since
* it wouldn't be on this list unless it was a target WQE
*/
if (sec_elapsed > io->tgt_wqe_timeout) {
ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
io->indicator, io->reqtag, io->type);
/*
* If elapsed time > timeout, abort it. No need to check type since
* it wouldn't be on this list unless it was a target WQE
*/
if (sec_elapsed > io->tgt_wqe_timeout) {
ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
io->indicator, io->reqtag, io->type);
/* remove from active_wqe list so won't try to abort again */
ocs_list_remove(&hw->io_timed_wqe, io);
/* remove from active_wqe list so won't try to abort again */
ocs_list_remove(&hw->io_timed_wqe, io);
/* save status of "timed out" for when abort completes */
io->status_saved = 1;
io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
io->saved_ext = 0;
io->saved_len = 0;
/* save status of "timed out" for when abort completes */
io->status_saved = 1;
io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
io->saved_ext = 0;
io->saved_len = 0;
/* now abort outstanding IO */
ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
/* now abort outstanding IO */
rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
if (rc) {
ocs_log_test(hw->os,
"abort failed xri=%#x tag=%#x rc=%d\n",
io->indicator, io->reqtag, rc);
}
/*
* need to go through entire list since each IO could have a
* different timeout value
*/
}
/*
* need to go through entire list since each IO could have a
* different timeout value
*/
}
ocs_unlock(&hw->io_lock);
/* if we're not in the middle of shutting down, schedule next timer */

View File

@ -149,13 +149,16 @@ ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
default_lengths[QTOP_CQ] = len;
break;
}
if (!eq || !next_qt) {
goto fail;
}
/* If this CQ is for MRQ, then delay the creation */
if (!use_mrq || next_qt->entry != QTOP_RQ) {
cq = hw_new_cq(eq, len);
if (cq == NULL) {
hw_queue_teardown(hw);
return OCS_HW_RTN_NO_MEMORY;
goto fail;
}
}
break;
@ -173,11 +176,13 @@ ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
hw_queue_teardown(hw);
return OCS_HW_RTN_NO_MEMORY;
}
if (cq == NULL)
goto fail;
wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
if (wq == NULL) {
hw_queue_teardown(hw);
return OCS_HW_RTN_NO_MEMORY;
goto fail;
}
/* Place this WQ on the EQ WQ array */
@ -249,10 +254,12 @@ ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
break;
}
if (cq == NULL)
goto fail;
mq = hw_new_mq(cq, len);
if (mq == NULL) {
hw_queue_teardown(hw);
return OCS_HW_RTN_NO_MEMORY;
goto fail;
}
break;
@ -332,6 +339,9 @@ ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
}
return OCS_HW_RTN_SUCCESS;
fail:
hw_queue_teardown(hw);
return OCS_HW_RTN_NO_MEMORY;
}
@ -737,8 +747,9 @@ hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t en
for (i = 0; i < num_rq_pairs; i++) {
if (rqs[i] != NULL) {
if (rqs[i]->rq_tracker != NULL) {
ocs_free(hw->os, rq->rq_tracker,
sizeof(ocs_hw_sequence_t*) * rq->entry_count);
ocs_free(hw->os, rqs[i]->rq_tracker,
sizeof(ocs_hw_sequence_t*) *
rqs[i]->entry_count);
}
ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
}
@ -861,9 +872,9 @@ hw_del_wq(hw_wq_t *wq)
void
hw_del_rq(hw_rq_t *rq)
{
ocs_hw_t *hw = rq->cq->eq->hw;
if (rq != NULL) {
ocs_hw_t *hw = rq->cq->eq->hw;
/* Free RQ tracker */
if (rq->rq_tracker != NULL) {
ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);

View File

@ -243,9 +243,13 @@ ocs_process_mbx_ioctl(ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd)
* 6. ioctl code releases the lock
*/
mtx_lock(&ocs->dbg_lock);
ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT,
__ocs_ioctl_mbox_cb, ocs);
msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0);
if (ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT,
__ocs_ioctl_mbox_cb, ocs)) {
device_printf(ocs->dev, "%s: command- %x failed\n", __func__,
((sli4_mbox_command_header_t *)mcmd->payload)->command);
}
msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0);
mtx_unlock(&ocs->dbg_lock);
if( SLI4_MBOX_COMMAND_SLI_CONFIG == ((sli4_mbox_command_header_t *)mcmd->payload)->command

View File

@ -2129,7 +2129,7 @@ set_port_protocol(ocs_t *ocs, char *name, char *value)
if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
/* Undefined failure */
ocs_log_err(ocs, "ocs_sem_p failed\n");
rc = -ENXIO;
return -ENXIO;
}
if (result.status == 0) {
/* Success. */
@ -2321,7 +2321,7 @@ set_active_profile(ocs_t *ocs, char *name, char *value)
if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
/* Undefined failure */
ocs_log_err(ocs, "ocs_sem_p failed\n");
rc = -ENXIO;
return -ENXIO;
}
if (result.status == 0) {
/* Success. */
@ -2527,8 +2527,8 @@ set_nv_wwn(ocs_t *ocs, char *name, char *wwn_p)
char *wwpn_p = NULL;
char *wwnn_p = NULL;
int32_t rc = -1;
int wwpn;
int wwnn;
int wwpn = 0;
int wwnn = 0;
int i;
/* This is a read-modify-write operation, so first we have to read
@ -2556,8 +2556,13 @@ set_nv_wwn(ocs_t *ocs, char *name, char *wwn_p)
wwnn_p = wwn_p;
}
wwpn = ocs_strcmp(wwpn_p, "NA");
wwnn = ocs_strcmp(wwnn_p, "NA");
if (wwpn_p != NULL) {
wwpn = ocs_strcmp(wwpn_p, "NA");
}
if (wwnn_p != NULL) {
wwnn = ocs_strcmp(wwnn_p, "NA");
}
/* Parse the new WWPN */
if ((wwpn_p != NULL) && (wwpn != 0)) {

View File

@ -253,7 +253,7 @@ ocs_node_create_pool(ocs_t *ocs, uint32_t node_count)
if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) &&
0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) {
max_xfer_size = max_sge * num_sgl;
max_xfer_size = (max_sge * (uint64_t)num_sgl);
} else {
max_xfer_size = 65536;
}

View File

@ -591,7 +591,7 @@ ocs_device_detach(ocs_t *ocs)
}
ocs_cam_detach(ocs);
ocs_free(ocs, ocs->fcports, sizeof(ocs->fcports));
ocs_free(ocs, ocs->fcports, sizeof(*(ocs->fcports)));
for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) {

View File

@ -292,10 +292,6 @@ ocs_xport_attach(ocs_xport_t *xport)
ocs_node_free_pool(ocs);
}
if (rq_threads_created) {
ocs_xport_rq_threads_teardown(xport);
}
return -1;
}

View File

@ -1867,10 +1867,7 @@ sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size,
}
}
break;
default:
ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type);
return -1;
}
}
return (sli_config_off + cmd_size);
}
@ -4637,6 +4634,8 @@ sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs,
return -1;
}
memset(&dma, 0, sizeof(dma));
/* Align the queue DMA memory */
for (i = 0; i < num_cqs; i++) {
if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES,
@ -4886,7 +4885,7 @@ sli_queue_reset(sli4_t *sli4, sli4_queue_t *q)
}
if (q->dma.virt != NULL) {
ocs_memset(q->dma.virt, 0, (q->size * q->length));
ocs_memset(q->dma.virt, 0, (q->size * (uint64_t)q->length));
}
ocs_unlock(&q->lock);
@ -8480,6 +8479,8 @@ sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs,
sli4_res_common_create_queue_set_t *rsp = NULL;
sli4_req_fcoe_rq_create_v2_t *req = NULL;
ocs_memset(&dma, 0, sizeof(dma));
for (i = 0; i < (num_rq_pairs * 2); i++) {
if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
n_entries, SLI_PAGE_SIZE)) {