Work in progress toward fixing IM checked in after having

lost one set to a peninsula power failure last night. After
this, I can see both submembers and the raid volumes again,
but speed negotiation is still broken.

Add a mpt_raid_free_mem function to centralize the resource
reclaim and fixed a small memory leak.

Remove restriction on number of targets for systems with IM enabled-
you can have setups that have both IM volumes as well as other devices.

Fix target id selection for passthru and nonpastrhu cases.

Move complete command dumpt to MPT_PRT_DEBUG1 level so that just
setting debug level gets mostly informative albeit less verbose
dumping.
This commit is contained in:
Matt Jacob 2006-05-27 17:26:57 +00:00
parent f510d240d3
commit 1d79ca0e46
6 changed files with 184 additions and 86 deletions

View File

@ -1206,7 +1206,7 @@ mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
void
mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
{
if (mpt->verbose > MPT_PRT_TRACE) {
if (mpt->verbose > MPT_PRT_DEBUG2) {
mpt_dump_request(mpt, req);
}
bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
@ -1685,15 +1685,16 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt)
size_t len;
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
/*PageNumber*/2, /*PageAddress*/0, &hdr,
/*sleep_ok*/FALSE, /*timeout_ms*/5000);
2, 0, &hdr, FALSE, 5000);
/*
* If it's an invalid page, so what? Not a supported function....
*/
if (rv == EINVAL)
if (rv == EINVAL) {
return (0);
if (rv)
}
if (rv) {
return (rv);
}
#if __FreeBSD_version >= 500000
mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, "
@ -1709,22 +1710,28 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt)
len = hdr.PageLength * sizeof(uint32_t);
mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->ioc_page2 == NULL)
if (mpt->ioc_page2 == NULL) {
mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
mpt_raid_free_mem(mpt);
return (ENOMEM);
}
memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
&mpt->ioc_page2->Header, len,
/*sleep_ok*/FALSE, /*timeout_ms*/5000);
rv = mpt_read_cur_cfg_page(mpt, 0,
&mpt->ioc_page2->Header, len, FALSE, 5000);
if (rv) {
mpt_prt(mpt, "failed to read IOC Page 2\n");
} else if (mpt->ioc_page2->CapabilitiesFlags != 0) {
mpt_raid_free_mem(mpt);
return (EIO);
}
if (mpt->ioc_page2->CapabilitiesFlags != 0) {
uint32_t mask;
mpt_prt(mpt, "Capabilities: (");
for (mask = 1; mask != 0; mask <<= 1) {
if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0)
if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
continue;
}
switch (mask) {
case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
mpt_prtc(mpt, " RAID-0");
@ -1766,11 +1773,11 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt)
}
len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT);
mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->raid_volumes == NULL) {
mpt_prt(mpt, "Could not allocate RAID volume data\n");
} else {
memset(mpt->raid_volumes, 0, len);
mpt_raid_free_mem(mpt);
return (ENOMEM);
}
/*
@ -1780,54 +1787,57 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt)
*/
mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
len = sizeof(*mpt->raid_volumes->config_page)
+ (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1));
len = sizeof(*mpt->raid_volumes->config_page) +
(sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
mpt_raid = &mpt->raid_volumes[i];
mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT);
mpt_raid->config_page =
malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt_raid->config_page == NULL) {
mpt_prt(mpt, "Could not allocate RAID page data\n");
break;
mpt_raid_free_mem(mpt);
return (ENOMEM);
}
memset(mpt_raid->config_page, 0, len);
}
mpt->raid_page0_len = len;
len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT);
mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->raid_disks == NULL) {
mpt_prt(mpt, "Could not allocate RAID disk data\n");
} else {
memset(mpt->raid_disks, 0, len);
mpt_raid_free_mem(mpt);
return (ENOMEM);
}
mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
/*
* Load page 3.
*/
rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
/*PageNumber*/3, /*PageAddress*/0, &hdr,
/*sleep_ok*/FALSE, /*timeout_ms*/5000);
if (rv)
return (EIO);
mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
if (mpt->ioc_page3 != NULL)
free(mpt->ioc_page3, M_DEVBUF);
len = hdr.PageLength * sizeof(uint32_t);
mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->ioc_page3 == NULL)
return (-1);
memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
&mpt->ioc_page3->Header, len,
/*sleep_ok*/FALSE, /*timeout_ms*/5000);
3, 0, &hdr, FALSE, 5000);
if (rv) {
mpt_prt(mpt, "failed to read IOC Page 3\n");
mpt_raid_free_mem(mpt);
return (EIO);
}
mpt_raid_wakeup(mpt);
mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
len = hdr.PageLength * sizeof(uint32_t);
mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (mpt->ioc_page3 == NULL) {
mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
mpt_raid_free_mem(mpt);
return (ENOMEM);
}
memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
rv = mpt_read_cur_cfg_page(mpt, 0,
&mpt->ioc_page3->Header, len, FALSE, 5000);
if (rv) {
mpt_raid_free_mem(mpt);
return (EIO);
}
mpt_raid_wakeup(mpt);
return (0);
}
@ -2458,6 +2468,9 @@ mpt_configure_ioc(struct mpt_softc *mpt)
/*
* Read IOC configuration information.
*
* We need this to determine whether or not we have certain
* settings for Integrated Mirroring (e.g.).
*/
mpt_read_config_info_ioc(mpt);

View File

@ -491,7 +491,8 @@ struct mpt_softc {
twildcard : 1,
tenabled : 1,
role : 2, /* none, ini, target, both */
: 2,
: 1,
raid_enabled : 1,
raid_mwce_set : 1,
getreqwaiter : 1,
shutdwn_raid : 1,
@ -549,7 +550,7 @@ struct mpt_softc {
} fc;
} cfg;
/* Controller Info */
/* Controller Info for RAID information */
CONFIG_PAGE_IOC_2 * ioc_page2;
CONFIG_PAGE_IOC_3 * ioc_page3;
@ -871,7 +872,7 @@ mpt_pio_read(struct mpt_softc *mpt, int offset)
#define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \
do { \
if (mpt->verbose >= MPT_PRT_DEBUG) \
if (mpt->verbose > MPT_PRT_DEBUG) \
mpt_dump_reply_frame(mpt, reply_frame); \
} while(0)

View File

@ -1184,7 +1184,7 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
} else {
callout_handle_init(&ccb->ccb_h.timeout_ch);
}
if (mpt->verbose >= MPT_PRT_DEBUG) {
if (mpt->verbose > MPT_PRT_DEBUG) {
int nc = 0;
mpt_print_request(req->req_vbuf);
for (trq = req->chain; trq; trq = trq->chain) {
@ -1192,6 +1192,7 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
mpt_dump_sgl(trq->req_vbuf, 0);
}
}
if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
@ -1567,7 +1568,7 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
} else {
callout_handle_init(&ccb->ccb_h.timeout_ch);
}
if (mpt->verbose >= MPT_PRT_DEBUG) {
if (mpt->verbose > MPT_PRT_DEBUG) {
int nc = 0;
mpt_print_request(req->req_vbuf);
for (trq = req->chain; trq; trq = trq->chain) {
@ -1603,6 +1604,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
struct ccb_scsiio *csio = &ccb->csio;
struct ccb_hdr *ccbh = &ccb->ccb_h;
bus_dmamap_callback_t *cb;
target_id_t tgt;
int raid_passthru;
/* Get the pointer for the physical addapter */
@ -1647,8 +1649,21 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
if (raid_passthru) {
mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
CAMLOCK_2_MPTLOCK(mpt);
if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
MPTLOCK_2_CAMLOCK(mpt);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
xpt_done(ccb);
return;
}
MPTLOCK_2_CAMLOCK(mpt);
mpt_req->Bus = 0; /* we never set bus here */
} else {
tgt = ccb->ccb_h.target_id;
mpt_req->Bus = 0; /* XXX */
}
mpt_req->Bus = 0; /* we don't have multiport devices yet */
mpt_req->SenseBufferLength =
(csio->sense_len < MPT_SENSE_SIZE) ?
csio->sense_len : MPT_SENSE_SIZE;
@ -1660,7 +1675,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
/* Which physical device to do the I/O on */
mpt_req->TargetID = ccb->ccb_h.target_id;
mpt_req->TargetID = tgt;
/* We assume a single level LUN type */
if (ccb->ccb_h.target_lun >= 256) {
@ -1721,10 +1736,26 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
mpt_req->DataLength = csio->dxfer_len;
mpt_req->SenseBufferLowAddr = req->sense_pbuf;
/*
* Do a *short* print here if we're set to MPT_PRT_DEBUG
*/
if (mpt->verbose == MPT_PRT_DEBUG) {
mpt_prt(mpt, "mpt_start: %s op 0x%x ",
(mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
"SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
mpt_prtc(mpt, "(%s %u byte%s ",
(mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
"read" : "write", csio->dxfer_len,
(csio->dxfer_len == 1)? ")" : "s)");
}
mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
ccb->ccb_h.target_lun, req, req->serno);
}
/*
* If we have any data to send with this command map it into bus space.
*/
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
/*
@ -2061,6 +2092,10 @@ mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
inq->device &= ~0x1F;
inq->device |= T_NODEVICE;
}
if (mpt->verbose == MPT_PRT_DEBUG) {
mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
req, req->serno);
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
MPTLOCK_2_CAMLOCK(mpt);
@ -2631,7 +2666,7 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
{
struct mpt_softc *mpt;
struct ccb_trans_settings *cts;
u_int tgt;
target_id_t tgt;
int raid_passthru;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
@ -2681,11 +2716,11 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
case XPT_RESET_BUS:
mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
if (raid_passthru == 0) {
CAMLOCK_2_MPTLOCK(mpt);
(void)mpt_bus_reset(mpt, FALSE);
MPTLOCK_2_CAMLOCK(mpt);
}
CAMLOCK_2_MPTLOCK(mpt);
(void) mpt_bus_reset(mpt, FALSE);
MPTLOCK_2_CAMLOCK(mpt);
/*
* mpt_bus_reset is always successful in that it
* will fall back to a hard reset should a bus
@ -2760,11 +2795,20 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
break;
}
if (mpt->ioc_page2 && mpt->ioc_page2->MaxPhysDisks != 0 &&
raid_passthru == 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
m = mpt->mpt_port_page2.PortSettings;
if ((m & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS) ==
MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
mpt_prt(mpt, "master settings\n");
if (raid_passthru == 0) {
mpt_set_ccb_status(ccb, CAM_REQ_CMP);
break;
}
}
dval = 0;
@ -2962,7 +3006,8 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
}
if (raid_passthru) {
cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
cpi->max_lun = 0;
cpi->hba_misc = PIM_NOBUSRESET;
cpi->initiator_id = cpi->max_target+1;
}
@ -3057,17 +3102,38 @@ static int
mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
{
#ifdef CAM_NEW_TRAN_CODE
struct ccb_trans_settings_scsi *scsi =
&cts->proto_specific.scsi;
struct ccb_trans_settings_spi *spi =
&cts->xport_specific.spi;
struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
#endif
int tgt;
target_id_t tgt;
uint8_t dval, pval, oval;
int rv;
/*
* Check to see if this is an Integrated Raid card.
*
* If it is, and we're the RAID bus side, both current
* and goal settings are synthesized as we only look at
* or change actual settings for the physical disk side.
*
* NB: In the future we can just do this on the blacked out
* NB: portion that the RAID volume covers- there may be
* NB: other entities on this bus as well.
*/
tgt = cts->ccb_h.target_id;
if (mpt->phydisk_sim) {
if (xpt_path_sim(cts->ccb_h.path) != mpt->phydisk_sim) {
dval = DP_WIDE|DP_DISC|DP_TQING;
oval = (mpt->mpt_port_page0.Capabilities >> 16);
pval = (mpt->mpt_port_page0.Capabilities >> 8);
tgt = cts->ccb_h.target_id;
goto skip;
}
}
if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt) != 0) {
return (-1);
}
/*
* We aren't going off of Port PAGE2 params for
@ -3115,6 +3181,7 @@ mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
oval = (mpt->mpt_port_page0.Capabilities >> 16);
pval = (mpt->mpt_port_page0.Capabilities >> 8);
}
skip:
#ifndef CAM_NEW_TRAN_CODE
cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
if (dval & DP_DISC_ENABLE) {

View File

@ -600,27 +600,10 @@ mpt_pci_detach(device_t dev)
mpt_reset(mpt, /*reinit*/FALSE);
mpt_dma_mem_free(mpt);
mpt_free_bus_resources(mpt);
if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
int i;
for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
struct mpt_raid_volume *mpt_vol;
mpt_vol = &mpt->raid_volumes[i];
if (mpt_vol->config_page) {
free(mpt_vol->config_page, M_DEVBUF);
}
}
}
if (mpt->ioc_page2 != NULL)
free(mpt->ioc_page2, M_DEVBUF);
if (mpt->ioc_page3 != NULL)
free(mpt->ioc_page3, M_DEVBUF);
if (mpt->raid_volumes != NULL)
free(mpt->raid_volumes, M_DEVBUF);
if (mpt->raid_disks != NULL)
free(mpt->raid_disks, M_DEVBUF);
if (mpt->eh != NULL)
mpt_raid_free_mem(mpt);
if (mpt->eh != NULL) {
EVENTHANDLER_DEREGISTER(shutdown_final, mpt->eh);
}
MPT_UNLOCK(mpt);
}
return(0);

View File

@ -1494,6 +1494,39 @@ mpt_schedule_raid_refresh(struct mpt_softc *mpt)
mpt_raid_timer, mpt);
}
void
mpt_raid_free_mem(struct mpt_softc *mpt)
{
if (mpt->raid_volumes) {
struct mpt_raid_volume *mpt_raid;
int i;
for (i = 0; i < mpt->raid_max_volumes; i++) {
mpt_raid = &mpt->raid_volumes[i];
if (mpt_raid->config_page) {
free(mpt_raid->config_page, M_DEVBUF);
mpt_raid->config_page = NULL;
}
}
free(mpt->raid_volumes, M_DEVBUF);
mpt->raid_volumes = NULL;
}
if (mpt->raid_disks) {
free(mpt->raid_disks, M_DEVBUF);
mpt->raid_disks = NULL;
}
if (mpt->ioc_page2) {
free(mpt->ioc_page2, M_DEVBUF);
mpt->ioc_page2 = NULL;
}
if (mpt->ioc_page3) {
free(mpt->ioc_page3, M_DEVBUF);
mpt->ioc_page3 = NULL;
}
mpt->raid_max_volumes = 0;
mpt->raid_max_disks = 0;
}
static int
mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
{

View File

@ -59,12 +59,13 @@ mpt_issue_raid_req(struct mpt_softc *, struct mpt_raid_volume *,
bus_size_t, int, int);
cam_status
mpt_map_physdisk(struct mpt_softc *, union ccb *, u_int *);
mpt_map_physdisk(struct mpt_softc *, union ccb *, target_id_t *);
cam_status
mpt_raid_quiesce_disk(struct mpt_softc *, struct mpt_raid_disk *, request_t *);
int mpt_refresh_raid_data(struct mpt_softc *);
void mpt_schedule_raid_refresh(struct mpt_softc *);
void mpt_raid_free_mem(struct mpt_softc *);
static __inline void
mpt_raid_wakeup(struct mpt_softc *mpt)