Get most of the way back to having Integrated Mirroring work

again- the addition of target mode support broke it massively.
This commit is contained in:
Matt Jacob 2006-05-26 05:54:21 +00:00
parent 8bf91348e5
commit a3116b5a27
4 changed files with 160 additions and 122 deletions

View File

@ -491,8 +491,7 @@ struct mpt_softc {
twildcard : 1,
tenabled : 1,
role : 2, /* none, ini, target, both */
: 1,
raid_enabled : 1,
: 2,
raid_mwce_set : 1,
getreqwaiter : 1,
shutdwn_raid : 1,

View File

@ -299,7 +299,7 @@ mpt_cam_attach(struct mpt_softc *mpt)
}
/*
* Register exactly the bus.
* Register exactly this bus.
*/
if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
mpt_prt(mpt, "Bus registration Failed!\n");
@ -335,7 +335,7 @@ mpt_cam_attach(struct mpt_softc *mpt)
}
/*
* Register exactly the bus.
* Register this bus.
*/
if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
@ -687,8 +687,8 @@ mpt_set_initial_config_spi(struct mpt_softc *mpt)
i = mpt->mpt_port_page2.PortSettings &
MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
mpt_lprt(mpt, MPT_PRT_INFO,
"honoring BIOS transfer negotiation for all targets\n");
mpt_lprt(mpt, /* MPT_PRT_INFO */ MPT_PRT_ALWAYS,
"honoring BIOS transfer negotiations\n");
return (0);
}
for (i = 0; i < 16; i++) {
@ -828,10 +828,11 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
struct mpt_softc *mpt;
int seg, first_lim;
uint32_t flags, nxt_off;
void *sglp;
void *sglp = NULL;
MSG_REQUEST_HEADER *hdrp;
SGE_SIMPLE64 *se;
SGE_CHAIN64 *ce;
int istgt = 0;
req = (request_t *)arg;
ccb = req->ccb;
@ -842,17 +843,28 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
hdrp = req->req_vbuf;
mpt_off = req->req_vbuf;
if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
} else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
}
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
}
if (error == 0) {
switch (hdrp->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
istgt = 0;
sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
break;
case MPI_FUNCTION_TARGET_ASSIST:
istgt = 1;
sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
break;
default:
mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
hdrp->Function);
error = EINVAL;
break;
}
}
bad:
if (error != 0) {
if (error != EFBIG && error != ENOMEM) {
@ -912,7 +924,7 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
if (istgt == 0) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
@ -924,7 +936,7 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
bus_dmasync_op_t op;
if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
if (istgt == 0) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREREAD;
} else {
@ -1208,10 +1220,11 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
struct mpt_softc *mpt;
int seg, first_lim;
uint32_t flags, nxt_off;
void *sglp;
void *sglp = NULL;
MSG_REQUEST_HEADER *hdrp;
SGE_SIMPLE32 *se;
SGE_CHAIN32 *ce;
int istgt = 0;
req = (request_t *)arg;
ccb = req->ccb;
@ -1223,12 +1236,27 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
mpt_off = req->req_vbuf;
if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
} else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
}
if (error == 0) {
switch (hdrp->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
break;
case MPI_FUNCTION_TARGET_ASSIST:
istgt = 1;
sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
break;
default:
mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
hdrp->Function);
error = EINVAL;
break;
}
}
if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
error = EFBIG;
@ -1292,7 +1320,7 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
if (istgt) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
}
@ -1304,7 +1332,7 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
bus_dmasync_op_t op;
if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
if (istgt) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
op = BUS_DMASYNC_PREREAD;
} else {
@ -1605,14 +1633,6 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
cb = mpt_execute_req;
}
#if 0
COWWWWW
if (raid_passthru) {
status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
request_t *req)
}
#endif
/*
* Link the ccb and the request structure so we can find
* the other knowing either the request or the ccb
@ -2625,9 +2645,9 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.func_code != XPT_RESET_BUS) {
CAMLOCK_2_MPTLOCK(mpt);
if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
MPTLOCK_2_CAMLOCK(mpt);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
MPTLOCK_2_CAMLOCK(mpt);
xpt_done(ccb);
return;
}
@ -2644,7 +2664,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
xpt_done(ccb);
break;
}
}
@ -2654,18 +2673,17 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
xpt_done(ccb);
return;
break;
}
ccb->csio.scsi_status = SCSI_STATUS_OK;
mpt_start(sim, ccb);
break;
return;
case XPT_RESET_BUS:
mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
if (!raid_passthru) {
if (raid_passthru == 0) {
CAMLOCK_2_MPTLOCK(mpt);
(void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
(void)mpt_bus_reset(mpt, FALSE);
MPTLOCK_2_CAMLOCK(mpt);
}
/*
@ -2675,7 +2693,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
*/
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
xpt_done(ccb);
break;
case XPT_ABORT:
@ -2699,7 +2716,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
break;
}
MPTLOCK_2_CAMLOCK(mpt);
xpt_done(ccb);
break;
}
@ -2737,12 +2753,10 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
if (!IS_CURRENT_SETTINGS(cts)) {
mpt_prt(mpt, "Attempt to set User settings\n");
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
xpt_done(ccb);
break;
}
if (mpt->is_fc || mpt->is_sas) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
xpt_done(ccb);
break;
}
@ -2750,7 +2764,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
if ((m & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS) ==
MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
xpt_done(ccb);
break;
}
@ -2839,7 +2852,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
"SET tgt %d flags %x period %x off %x\n",
tgt, dval, period, offset);
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
xpt_done(ccb);
break;
}
case XPT_GET_TRAN_SETTINGS:
@ -2899,12 +2911,10 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
} else {
if (mpt_get_spi_settings(mpt, cts) != 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
xpt_done(ccb);
return;
break;
}
}
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
xpt_done(ccb);
break;
case XPT_CALC_GEOMETRY:
@ -2915,12 +2925,10 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
if (ccg->block_size == 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
xpt_done(ccb);
break;
}
mpt_calc_geometry(ccg, /*extended*/1);
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
xpt_done(ccb);
break;
}
case XPT_PATH_INQ: /* Path routing inquiry */
@ -2933,21 +2941,7 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
cpi->max_lun = 7;
cpi->bus_id = cam_sim_bus(sim);
/* XXX Report base speed more accurately for FC/SAS, etc.*/
if (raid_passthru) {
cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
cpi->hba_misc = PIM_NOBUSRESET;
cpi->initiator_id = cpi->max_target + 1;
cpi->hba_inquiry = PI_TAG_ABLE;
if (mpt->is_fc) {
cpi->base_transfer_speed = 100000;
} else if (mpt->is_sas) {
cpi->base_transfer_speed = 300000;
} else {
cpi->base_transfer_speed = 3300;
cpi->hba_inquiry |=
PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
}
} else if (mpt->is_fc) {
if (mpt->is_fc) {
/* XXX SHOULD BE BASED UPON IOC FACTS XXX XXX */
cpi->max_target = 255;
cpi->hba_misc = PIM_NOBUSRESET;
@ -2961,11 +2955,15 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
cpi->base_transfer_speed = 300000;
cpi->hba_inquiry = PI_TAG_ABLE;
} else {
cpi->max_target = 15;
cpi->hba_misc = 0;
cpi->initiator_id = mpt->mpt_ini_id;
cpi->base_transfer_speed = 3300;
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
cpi->hba_misc = 0;
cpi->max_target = 15;
}
if (raid_passthru) {
cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
cpi->initiator_id = cpi->max_target+1;
}
if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
@ -2982,7 +2980,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_EN_LUN: /* Enable LUN as a target */
@ -3002,7 +2999,6 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
} else {
mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
}
xpt_done(ccb);
break;
}
case XPT_NOTIFY_ACK: /* recycle notify ack */
@ -3018,13 +3014,11 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
if (lun == CAM_LUN_WILDCARD) {
if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
xpt_done(ccb);
break;
}
trtp = &mpt->trt_wildcard;
} else if (lun >= MPT_MAX_LUNS) {
mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
xpt_done(ccb);
break;
} else {
trtp = &mpt->trt[lun];
@ -3054,9 +3048,9 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
break;
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
xpt_done(ccb);
}
static int
@ -3470,6 +3464,7 @@ mpt_recover_commands(struct mpt_softc *mpt)
*/
switch (hdrp->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
break;
default:
/*

View File

@ -493,6 +493,7 @@ mpt_print_reply(void *vmsg)
mpt_print_init_reply((MSG_IOC_INIT_REPLY *)msg);
break;
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
mpt_print_scsi_io_reply((MSG_SCSI_IO_REPLY *)msg);
break;
default:
@ -613,6 +614,7 @@ mpt_print_request(void *vreq)
switch (req->Function) {
case MPI_FUNCTION_SCSI_IO_REQUEST:
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req);
break;
case MPI_FUNCTION_SCSI_TASK_MGMT:
@ -795,6 +797,28 @@ mpt_dump_sgl(SGE_IO_UNION *su, int offset)
} while ((flags & MPI_SGE_FLAGS_END_OF_LIST) == 0 && nxtaddr < lim);
}
void
mpt_dump_request(struct mpt_softc *mpt, request_t *req)
{
uint32_t *pReq = req->req_vbuf;
int offset;
#if __FreeBSD_version >= 500000
mpt_prt(mpt, "Send Request %d (%jx):",
req->index, (uintmax_t) req->req_pbuf);
#else
mpt_prt(mpt, "Send Request %d (%llx):",
req->index, (unsigned long long) req->req_pbuf);
#endif
for (offset = 0; offset < mpt->request_frame_size; offset++) {
if ((offset & 0x7) == 0) {
mpt_prtc(mpt, "\n");
mpt_prt(mpt, " ");
}
mpt_prtc(mpt, " %08x", pReq[offset]);
}
mpt_prtc(mpt, "\n");
}
#if __FreeBSD_version < 500000
void
mpt_lprt(struct mpt_softc *mpt, int level, const char *fmt, ...)

View File

@ -76,6 +76,7 @@ struct mpt_raid_action_result
static mpt_probe_handler_t mpt_raid_probe;
static mpt_attach_handler_t mpt_raid_attach;
static mpt_enable_handler_t mpt_raid_enable;
static mpt_event_handler_t mpt_raid_event;
static mpt_shutdown_handler_t mpt_raid_shutdown;
static mpt_reset_handler_t mpt_raid_ioc_reset;
@ -86,6 +87,7 @@ static struct mpt_personality mpt_raid_personality =
.name = "mpt_raid",
.probe = mpt_raid_probe,
.attach = mpt_raid_attach,
.enable = mpt_raid_enable,
.event = mpt_raid_event,
.reset = mpt_raid_ioc_reset,
.shutdown = mpt_raid_shutdown,
@ -220,8 +222,9 @@ mpt_raid_async(void *callback_arg, u_int32_t code,
struct mpt_raid_volume *mpt_vol;
cgd = (struct ccb_getdev *)arg;
if (cgd == NULL)
if (cgd == NULL) {
break;
}
mpt_lprt(mpt, MPT_PRT_DEBUG, " Callback for %d\n",
cgd->ccb_h.target_id);
@ -263,8 +266,10 @@ mpt_raid_attach(struct mpt_softc *mpt)
handler.reply_handler = mpt_raid_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&raid_handler_id);
if (error != 0)
if (error != 0) {
mpt_prt(mpt, "Unable to register RAID haandler!\n");
goto cleanup;
}
error = mpt_spawn_raid_thread(mpt);
if (error != 0) {
@ -272,7 +277,7 @@ mpt_raid_attach(struct mpt_softc *mpt)
goto cleanup;
}
xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_FOUND_DEVICE;
csa.callback = mpt_raid_async;
@ -292,6 +297,12 @@ mpt_raid_attach(struct mpt_softc *mpt)
return (error);
}
int
mpt_raid_enable(struct mpt_softc *mpt)
{
return (0);
}
void
mpt_raid_detach(struct mpt_softc *mpt)
{
@ -347,11 +358,12 @@ mpt_raid_event(struct mpt_softc *mpt, request_t *req,
int i;
int print_event;
if (msg->Event != MPI_EVENT_INTEGRATED_RAID)
return (/*handled*/0);
if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
return (0);
}
raid_event = (EVENT_DATA_RAID *)&msg->Data;
mpt_vol = NULL;
vol_pg = NULL;
if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
@ -373,12 +385,11 @@ mpt_raid_event(struct mpt_softc *mpt, request_t *req,
}
mpt_disk = NULL;
if (raid_event->PhysDiskNum != 0xFF
&& mpt->raid_disks != NULL) {
mpt_disk = mpt->raid_disks
+ raid_event->PhysDiskNum;
if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
mpt_disk = NULL;
}
}
print_event = 1;
@ -404,8 +415,9 @@ mpt_raid_event(struct mpt_softc *mpt, request_t *req,
case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
mpt->raid_rescan++;
if (mpt_vol != NULL)
if (mpt_vol != NULL) {
mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
}
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
@ -414,8 +426,9 @@ mpt_raid_event(struct mpt_softc *mpt, request_t *req,
case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
mpt->raid_rescan++;
if (mpt_disk != NULL)
if (mpt_disk != NULL) {
mpt_disk->flags &= ~MPT_RDF_UP2DATE;
}
break;
case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
mpt->raid_rescan++;
@ -462,7 +475,7 @@ mpt_raid_event(struct mpt_softc *mpt, request_t *req,
}
mpt_raid_wakeup(mpt);
return (/*handled*/1);
return (1);
}
static void
@ -487,7 +500,7 @@ mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
int free_req;
if (req == NULL)
return (/*free_reply*/TRUE);
return (TRUE);
free_req = TRUE;
if (reply_frame != NULL)
@ -508,7 +521,7 @@ mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
mpt_free_request(mpt, req);
}
return (/*free_reply*/TRUE);
return (TRUE);
}
/*
@ -517,7 +530,7 @@ mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
*/
static int
mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
MSG_DEFAULT_REPLY *reply_frame)
MSG_DEFAULT_REPLY *reply_frame)
{
MSG_RAID_ACTION_REPLY *reply;
struct mpt_raid_action_result *action_result;
@ -529,27 +542,19 @@ mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
switch (rap->Action) {
case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
/*
* Parse result, call mpt_start with ccb,
* release device queue.
* COWWWWW
*/
mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
break;
case MPI_RAID_ACTION_ENABLE_PHYS_IO:
/*
* Need additional state for transition to enabled to
* protect against attempts to disable??
*/
mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
break;
default:
action_result = REQ_TO_RAID_ACTION_RESULT(req);
memcpy(&action_result->action_data, &reply->ActionData,
sizeof(action_result->action_data));
action_result->action_status = reply->ActionStatus;
break;
}
return (/*Free Request*/TRUE);
action_result = REQ_TO_RAID_ACTION_RESULT(req);
memcpy(&action_result->action_data, &reply->ActionData,
sizeof(action_result->action_data));
action_result->action_status = reply->ActionStatus;
return (TRUE);
}
/*
@ -661,7 +666,10 @@ mpt_raid_thread(void *arg)
mpt->raid_wakeup = 0;
mpt_refresh_raid_data(mpt);
if (mpt_refresh_raid_data(mpt)) {
mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
continue;
}
/*
* Now that we have our first snapshot of RAID data,
@ -669,7 +677,9 @@ mpt_raid_thread(void *arg)
*/
if (firstrun) {
firstrun = 0;
xpt_release_simq(mpt->phydisk_sim, /*run_queue*/TRUE);
MPTLOCK_2_CAMLOCK(mpt);
xpt_release_simq(mpt->phydisk_sim, TRUE);
CAMLOCK_2_MPTLOCK(mpt);
}
if (mpt->raid_rescan != 0) {
@ -681,18 +691,19 @@ mpt_raid_thread(void *arg)
ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
error = xpt_create_path(&path, xpt_periph,
cam_sim_path(mpt->phydisk_sim),
CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD);
cam_sim_path(mpt->phydisk_sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
if (error != CAM_REQ_CMP) {
free(ccb, M_DEVBUF);
mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
} else {
xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/5);
xpt_setup_ccb(&ccb->ccb_h, path, 5);
ccb->ccb_h.func_code = XPT_SCAN_BUS;
ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
ccb->crcn.flags = CAM_FLAG_NONE;
MPTLOCK_2_CAMLOCK(mpt);
xpt_action(ccb);
CAMLOCK_2_MPTLOCK(mpt);
}
}
}
@ -1217,7 +1228,7 @@ mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
* be updated by our event handler. Interesting changes are displayed
* to the console.
*/
void
int
mpt_refresh_raid_data(struct mpt_softc *mpt)
{
CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
@ -1231,7 +1242,7 @@ mpt_refresh_raid_data(struct mpt_softc *mpt)
u_int nonopt_volumes;
if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
return;
return (0);
}
/*
@ -1254,9 +1265,9 @@ mpt_refresh_raid_data(struct mpt_softc *mpt)
&mpt->ioc_page3->Header, len,
/*sleep_ok*/TRUE, /*timeout_ms*/5000);
if (rv) {
mpt_prt(mpt, "mpt_refresh_raid_data: "
"Failed to read IOC Page 3\n");
return;
mpt_prt(mpt,
"mpt_refresh_raid_data: Failed to read IOC Page 3\n");
return (-1);
}
ioc_disk = mpt->ioc_page3->PhysDisk;
@ -1286,7 +1297,7 @@ mpt_refresh_raid_data(struct mpt_softc *mpt)
if (rv) {
mpt_prt(mpt, "mpt_refresh_raid_data: "
"Failed to read IOC Page 2\n");
return;
return (-1);
}
ioc_vol = mpt->ioc_page2->RaidVolume;
@ -1455,6 +1466,7 @@ mpt_refresh_raid_data(struct mpt_softc *mpt)
}
mpt->raid_nonopt_volumes = nonopt_volumes;
return (0);
}
static void
@ -1612,24 +1624,28 @@ mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
u_int i;
GIANT_REQUIRED;
mpt = (struct mpt_softc *)arg1;
str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
error = SYSCTL_OUT(req, str, strlen(str) + 1);
if (error || !req->newptr)
if (error || !req->newptr) {
return (error);
}
size = req->newlen - req->newidx;
if (size >= sizeof(inbuf))
if (size >= sizeof(inbuf)) {
return (EINVAL);
}
error = SYSCTL_IN(req, inbuf, size);
if (error)
if (error) {
return (error);
}
inbuf[size] = '\0';
for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0)
if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
return (mpt_raid_set_vol_mwce(mpt, i));
}
}
return (EINVAL);
}
@ -1642,12 +1658,14 @@ mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
int error;
GIANT_REQUIRED;
mpt = (struct mpt_softc *)arg1;
raid_resync_rate = mpt->raid_resync_rate;
error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
if (error || !req->newptr)
if (error || !req->newptr) {
return error;
}
return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
}
@ -1660,12 +1678,14 @@ mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
int error;
GIANT_REQUIRED;
mpt = (struct mpt_softc *)arg1;
raid_queue_depth = mpt->raid_queue_depth;
error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
if (error || !req->newptr)
if (error || !req->newptr) {
return error;
}
return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
}