MFC a number of changes from head for ISP (203478,203463,203444,202418,201758,
201408,201325,200089,198822,197373,197372,197214,196162). Since one of those changes was a semicolon cleanup from somebody else, this touches a lot more.
This commit is contained in:
parent
1d0daf9e44
commit
7733cf8fff
@ -1555,7 +1555,7 @@ ses_encode(char *b, int amt, uint8_t *ep, int elt, int elm, SesComStat *sp)
|
||||
*/
|
||||
|
||||
static int safte_getconfig(ses_softc_t *);
|
||||
static int safte_rdstat(ses_softc_t *, int);;
|
||||
static int safte_rdstat(ses_softc_t *, int);
|
||||
static int set_objstat_sel(ses_softc_t *, ses_objstat *, int);
|
||||
static int wrbuf16(ses_softc_t *, uint8_t, uint8_t, uint8_t, uint8_t, int);
|
||||
static void wrslot_stat(ses_softc_t *, int);
|
||||
@ -2257,7 +2257,7 @@ safte_rdstat(ses_softc_t *ssc, int slpflg)
|
||||
ssc->ses_objmap[oid].encstat[0] = SES_OBJSTAT_NOTAVAIL;
|
||||
ssc->ses_objmap[oid].encstat[1] = 0;
|
||||
ssc->ses_objmap[oid].encstat[2] = sdata[r];
|
||||
ssc->ses_objmap[oid].encstat[3] = 0;;
|
||||
ssc->ses_objmap[oid].encstat[3] = 0;
|
||||
ssc->ses_objmap[oid++].svalid = 1;
|
||||
r++;
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ targbhdtor(struct cam_periph *periph)
|
||||
|
||||
switch (softc->init_level) {
|
||||
case 0:
|
||||
panic("targdtor - impossible init level");;
|
||||
panic("targdtor - impossible init level");
|
||||
case 1:
|
||||
/* FALLTHROUGH */
|
||||
default:
|
||||
|
@ -493,7 +493,7 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
|
||||
return (error);
|
||||
if (f == NULL) {
|
||||
td->td_retval[0] = 0;
|
||||
return (error);;
|
||||
return (error);
|
||||
}
|
||||
td->td_retval[0] = futex_wake(f, args->val);
|
||||
futex_put(f, NULL);
|
||||
|
@ -553,7 +553,7 @@ aac_alloc(struct aac_softc *sc)
|
||||
0, /* flags */
|
||||
NULL, NULL, /* No locking needed */
|
||||
&sc->aac_fib_dmat)) {
|
||||
device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");;
|
||||
device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -763,7 +763,7 @@ agp_allocate_user(device_t dev, agp_allocate *alloc)
|
||||
static int
|
||||
agp_deallocate_user(device_t dev, int id)
|
||||
{
|
||||
struct agp_memory *mem = agp_find_memory(dev, id);;
|
||||
struct agp_memory *mem = agp_find_memory(dev, id);
|
||||
|
||||
if (mem) {
|
||||
AGP_FREE_MEMORY(dev, mem);
|
||||
|
@ -1657,7 +1657,7 @@ amdhandlemsgreject(struct amd_softc *amd)
|
||||
tinfo_sync_period[pDCB->SyncPeriod - 4];
|
||||
pDCB->tinfo.goal.offset = pDCB->SyncOffset;
|
||||
pDCB->tinfo.current.period =
|
||||
tinfo_sync_period[pDCB->SyncPeriod - 4];;
|
||||
tinfo_sync_period[pDCB->SyncPeriod - 4];
|
||||
pDCB->tinfo.current.offset = pDCB->SyncOffset;
|
||||
|
||||
/*
|
||||
|
@ -221,7 +221,7 @@ amr_attach(struct amr_softc *sc)
|
||||
sc->amr_submit_command = amr_std_submit_command;
|
||||
sc->amr_get_work = amr_std_get_work;
|
||||
sc->amr_poll_command = amr_std_poll_command;
|
||||
amr_std_attach_mailbox(sc);;
|
||||
amr_std_attach_mailbox(sc);
|
||||
}
|
||||
|
||||
#ifdef AMR_BOARD_INIT
|
||||
|
@ -1940,7 +1940,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
|
||||
switch (pccb->csio.cdb_io.cdb_bytes[0]) {
|
||||
case INQUIRY: {
|
||||
unsigned char inqdata[36];
|
||||
char *buffer=pccb->csio.data_ptr;;
|
||||
char *buffer=pccb->csio.data_ptr;
|
||||
|
||||
if (pccb->ccb_h.target_lun) {
|
||||
pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
|
||||
|
@ -407,7 +407,7 @@ ata_raid_strategy(struct bio *bp)
|
||||
if (rdp->status & AR_S_REBUILDING)
|
||||
blk = ((lba / rdp->interleave) * rdp->width) * rdp->interleave +
|
||||
(rdp->interleave * (drv % rdp->width)) +
|
||||
lba % rdp->interleave;;
|
||||
lba % rdp->interleave;
|
||||
|
||||
if (bp->bio_cmd == BIO_READ) {
|
||||
int src_online =
|
||||
|
@ -656,9 +656,9 @@ ata_ahci_pm_write(device_t dev, int port, int reg, u_int32_t value)
|
||||
ctp->cfis[3] = reg;
|
||||
ctp->cfis[7] = port | ATA_D_LBA;
|
||||
ctp->cfis[12] = value & 0xff;
|
||||
ctp->cfis[4] = (value >> 8) & 0xff;;
|
||||
ctp->cfis[5] = (value >> 16) & 0xff;;
|
||||
ctp->cfis[6] = (value >> 24) & 0xff;;
|
||||
ctp->cfis[4] = (value >> 8) & 0xff;
|
||||
ctp->cfis[5] = (value >> 16) & 0xff;
|
||||
ctp->cfis[6] = (value >> 24) & 0xff;
|
||||
ctp->cfis[15] = ATA_A_4BIT;
|
||||
|
||||
if (ata_ahci_issue_cmd(dev, 0, 100)) {
|
||||
|
@ -716,9 +716,9 @@ ata_siiprb_pm_write(device_t dev, int port, int reg, u_int32_t value)
|
||||
prb->fis[3] = reg;
|
||||
prb->fis[7] = port;
|
||||
prb->fis[12] = value & 0xff;
|
||||
prb->fis[4] = (value >> 8) & 0xff;;
|
||||
prb->fis[5] = (value >> 16) & 0xff;;
|
||||
prb->fis[6] = (value >> 24) & 0xff;;
|
||||
prb->fis[4] = (value >> 8) & 0xff;
|
||||
prb->fis[5] = (value >> 16) & 0xff;
|
||||
prb->fis[6] = (value >> 24) & 0xff;
|
||||
if (ata_siiprb_issue_cmd(dev)) {
|
||||
device_printf(dev, "error writing PM port\n");
|
||||
return ATA_E_ABORT;
|
||||
|
@ -136,7 +136,7 @@ static void ar5211GetLowerUpperPcdacs(uint16_t pcdac,
|
||||
uint16_t channel, const PCDACS_EEPROM *pSrcStruct,
|
||||
uint16_t *pLowerPcdac, uint16_t *pUpperPcdac);
|
||||
|
||||
static void ar5211SetRfgain(struct ath_hal *, const GAIN_VALUES *);;
|
||||
static void ar5211SetRfgain(struct ath_hal *, const GAIN_VALUES *);
|
||||
static void ar5211RequestRfgain(struct ath_hal *);
|
||||
static HAL_BOOL ar5211InvalidGainReadback(struct ath_hal *, GAIN_VALUES *);
|
||||
static HAL_BOOL ar5211IsGainAdjustNeeded(struct ath_hal *, const GAIN_VALUES *);
|
||||
|
@ -76,7 +76,7 @@ ar5212GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
|
||||
isr = OS_REG_READ(ah, AR_ISR_RAC);
|
||||
if (isr == 0xffffffff) {
|
||||
*masked = 0;
|
||||
return AH_FALSE;;
|
||||
return AH_FALSE;
|
||||
}
|
||||
|
||||
*masked = isr & HAL_INT_COMMON;
|
||||
|
@ -104,7 +104,7 @@ ar5416GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
|
||||
isr = OS_REG_READ(ah, AR_ISR_RAC);
|
||||
if (isr == 0xffffffff) {
|
||||
*masked = 0;
|
||||
return AH_FALSE;;
|
||||
return AH_FALSE;
|
||||
}
|
||||
|
||||
*masked = isr & HAL_INT_COMMON;
|
||||
|
@ -331,7 +331,7 @@ bti2c_smb_readb(device_t dev, u_char slave, char cmd, char *byte)
|
||||
/* clear status bits */
|
||||
OUTL(sc,BKTR_INT_STAT, (BT848_INT_RACK | BT848_INT_I2CDONE));
|
||||
|
||||
OUTL(sc,BKTR_I2C_DATA_CTL, ((slave & 0xff) << 24) | (u_char)cmd);;
|
||||
OUTL(sc,BKTR_I2C_DATA_CTL, ((slave & 0xff) << 24) | (u_char)cmd);
|
||||
|
||||
BTI2C_DEBUG(printf("r%lx/", (u_long)(((slave & 0xff) << 24) | (u_char)cmd)));
|
||||
|
||||
|
@ -475,7 +475,7 @@ int
|
||||
cs_attach(device_t dev)
|
||||
{
|
||||
int error, media=0;
|
||||
struct cs_softc *sc = device_get_softc(dev);;
|
||||
struct cs_softc *sc = device_get_softc(dev);
|
||||
struct ifnet *ifp;
|
||||
|
||||
sc->dev = dev;
|
||||
|
@ -152,7 +152,7 @@ struct rx_desc {
|
||||
uint32_t len_gen;
|
||||
uint32_t gen2;
|
||||
uint32_t addr_hi;
|
||||
} __packed;;
|
||||
} __packed;
|
||||
|
||||
struct rsp_desc { /* response queue descriptor */
|
||||
struct rss_header rss_hdr;
|
||||
|
@ -2289,7 +2289,7 @@ tulip_identify_asante_nic(tulip_softc_t * const sc)
|
||||
mi->mi_gpr_length = 0;
|
||||
mi->mi_gpr_offset = 0;
|
||||
mi->mi_reset_length = 0;
|
||||
mi->mi_reset_offset = 0;;
|
||||
mi->mi_reset_offset = 0;
|
||||
|
||||
mi->mi_phyaddr = TULIP_MII_NOPHY;
|
||||
for (idx = 20; idx > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx--) {
|
||||
|
@ -4446,7 +4446,7 @@ em_free_receive_structures(struct adapter *adapter)
|
||||
static int
|
||||
em_rxeof(struct adapter *adapter, int count)
|
||||
{
|
||||
struct ifnet *ifp = adapter->ifp;;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
struct mbuf *mp;
|
||||
u8 status, accept_frame = 0, eop = 0;
|
||||
u16 len, desc_len, prev_len_adj;
|
||||
|
@ -860,7 +860,7 @@ fatm_getprom(struct fatm_softc *sc)
|
||||
NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN);
|
||||
|
||||
q->error = 0;
|
||||
q->cb = NULL;;
|
||||
q->cb = NULL;
|
||||
H_SETSTAT(q->q.statp, FATM_STAT_PENDING);
|
||||
H_SYNCSTAT_PREWRITE(sc, q->q.statp);
|
||||
|
||||
|
@ -1573,7 +1573,7 @@ END_DEBUG
|
||||
bcopy(&sbp_cmd_status->s_keydep[0],
|
||||
&sense->sense_key_spec[0], 3);
|
||||
|
||||
ocb->ccb->csio.scsi_status = sbp_cmd_status->status;;
|
||||
ocb->ccb->csio.scsi_status = sbp_cmd_status->status;
|
||||
ocb->ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
|
||||
| CAM_AUTOSNS_VALID;
|
||||
/*
|
||||
@ -2148,7 +2148,7 @@ sbp_free_target(struct sbp_target *target)
|
||||
}
|
||||
STAILQ_INIT(&target->xferlist);
|
||||
free(target->luns, M_SBP);
|
||||
target->num_lun = 0;;
|
||||
target->num_lun = 0;
|
||||
target->luns = NULL;
|
||||
target->fwdev = NULL;
|
||||
}
|
||||
@ -2318,7 +2318,7 @@ sbp_timeout(void *arg)
|
||||
sbp_cam_detach_target(target);
|
||||
if (target->luns != NULL)
|
||||
free(target->luns, M_SBP);
|
||||
target->num_lun = 0;;
|
||||
target->num_lun = 0;
|
||||
target->luns = NULL;
|
||||
target->fwdev = NULL;
|
||||
#endif
|
||||
|
@ -836,7 +836,7 @@ hatm_init_rx_buffer_pool(struct hatm_softc *sc,
|
||||
uint32_t lbuf_addr; /* address of current buffer */
|
||||
u_int i;
|
||||
|
||||
row_size = sc->bytes_per_row;;
|
||||
row_size = sc->bytes_per_row;
|
||||
row_addr = start * row_size;
|
||||
lbuf_size = sc->cells_per_lbuf * 48;
|
||||
lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
|
||||
@ -889,7 +889,7 @@ hatm_init_tx_buffer_pool(struct hatm_softc *sc,
|
||||
uint32_t lbuf_addr; /* address of current buffer */
|
||||
u_int i;
|
||||
|
||||
row_size = sc->bytes_per_row;;
|
||||
row_size = sc->bytes_per_row;
|
||||
row_addr = start * row_size;
|
||||
lbuf_size = sc->cells_per_lbuf * 48;
|
||||
lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
|
||||
|
@ -1341,7 +1341,7 @@ init_adapter(IAL_ADAPTER_T *pAdapter)
|
||||
#endif
|
||||
&pAdapter->io_dma_parent /* tag */))
|
||||
{
|
||||
return ENXIO;;
|
||||
return ENXIO;
|
||||
}
|
||||
|
||||
|
||||
|
@ -204,7 +204,7 @@ ndisusb_detach(device_t self)
|
||||
{
|
||||
int i;
|
||||
struct ndis_softc *sc = device_get_softc(self);
|
||||
struct ndisusb_ep *ne;;
|
||||
struct ndisusb_ep *ne;
|
||||
|
||||
sc->ndisusb_status |= NDISUSB_STATUS_DETACH;
|
||||
|
||||
|
@ -399,7 +399,7 @@ ism_recv(isc_session_t *sp, pduq_t *pq)
|
||||
if(sp->flags & ISC_STALLED) {
|
||||
sdebug(4, "window opened: max=0x%x exp=0x%x opcode=0x%x cmd=0x%x cws=%d.",
|
||||
sn->maxCmd, sn->expCmd, bhs->opcode, sn->cmd, sp->cws);
|
||||
sp->flags &= ~ISC_STALLED;;
|
||||
sp->flags &= ~ISC_STALLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,6 @@ __FBSDID("$FreeBSD$");
|
||||
/*
|
||||
* General defines
|
||||
*/
|
||||
|
||||
#define MBOX_DELAY_COUNT 1000000 / 100
|
||||
#define ISP_MARK_PORTDB(a, b, c) \
|
||||
isp_prt(isp, ISP_LOGSANCFG, \
|
||||
@ -695,7 +694,7 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
|
||||
mbs.logval = MBLOGALL;
|
||||
isp_mboxcmd(isp, &mbs);
|
||||
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
|
||||
isp_prt(isp, ISP_LOGERR, "NOP ommand failed (%x)", mbs.param[0]);
|
||||
isp_prt(isp, ISP_LOGERR, "NOP command failed (%x)", mbs.param[0]);
|
||||
ISP_RESET0(isp);
|
||||
return;
|
||||
}
|
||||
@ -1547,24 +1546,18 @@ isp_fibre_init(ispsoftc_t *isp)
|
||||
}
|
||||
|
||||
icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp);
|
||||
if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN ||
|
||||
icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) {
|
||||
isp_prt(isp, ISP_LOGERR,
|
||||
"bad frame length (%d) from NVRAM- using %d",
|
||||
DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN);
|
||||
if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) {
|
||||
isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN);
|
||||
icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN;
|
||||
}
|
||||
icbp->icb_maxalloc = fcp->isp_maxalloc;
|
||||
if (icbp->icb_maxalloc < 1) {
|
||||
isp_prt(isp, ISP_LOGERR,
|
||||
"bad maximum allocation (%d)- using 16", fcp->isp_maxalloc);
|
||||
isp_prt(isp, ISP_LOGERR, "bad maximum allocation (%d)- using 16", fcp->isp_maxalloc);
|
||||
icbp->icb_maxalloc = 16;
|
||||
}
|
||||
icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp);
|
||||
if (icbp->icb_execthrottle < 1) {
|
||||
isp_prt(isp, ISP_LOGERR,
|
||||
"bad execution throttle of %d- using %d",
|
||||
DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE);
|
||||
isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE);
|
||||
icbp->icb_execthrottle = ICB_DFLT_THROTTLE;
|
||||
}
|
||||
icbp->icb_retry_delay = fcp->isp_retry_delay;
|
||||
@ -1658,18 +1651,18 @@ isp_fibre_init(ispsoftc_t *isp)
|
||||
|
||||
/*
|
||||
* For 22XX > 2.1.26 && 23XX, set some options.
|
||||
* XXX: Probably okay for newer 2100 f/w too.
|
||||
*/
|
||||
if (ISP_FW_NEWER_THAN(isp, 2, 26, 0)) {
|
||||
/*
|
||||
* Turn on LIP F8 async event (1)
|
||||
* Turn on generate AE 8013 on all LIP Resets (2)
|
||||
* Disable LIP F7 switching (8)
|
||||
*/
|
||||
MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0);
|
||||
mbs.param[1] = 0xb;
|
||||
mbs.param[1] = IFCOPT1_DISF7SWTCH|IFCOPT1_LIPASYNC|IFCOPT1_LIPF8;
|
||||
mbs.param[2] = 0;
|
||||
mbs.param[3] = 0;
|
||||
if (ISP_FW_NEWER_THAN(isp, 3, 16, 0)) {
|
||||
mbs.param[1] |= IFCOPT1_EQFQASYNC|IFCOPT1_CTIO_RETRY;
|
||||
if (fcp->role & ISP_ROLE_TARGET) {
|
||||
mbs.param[3] = IFCOPT3_NOPRLI;
|
||||
}
|
||||
}
|
||||
isp_mboxcmd(isp, &mbs);
|
||||
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
|
||||
return;
|
||||
@ -2093,8 +2086,7 @@ isp_mark_portdb(ispsoftc_t *isp, int chan, int disposition)
|
||||
* or via FABRIC LOGIN/FABRIC LOGOUT for other cards.
|
||||
*/
|
||||
static int
|
||||
isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid,
|
||||
int flags, int gs)
|
||||
isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, int flags, int gs)
|
||||
{
|
||||
mbreg_t mbs;
|
||||
uint8_t q[QENTRY_LEN];
|
||||
@ -2771,21 +2763,15 @@ isp_pdb_sync(ispsoftc_t *isp, int chan)
|
||||
/*
|
||||
* Make sure we're okay for doing this right now.
|
||||
*/
|
||||
if (fcp->isp_loopstate != LOOP_PDB_RCVD &&
|
||||
fcp->isp_loopstate != LOOP_FSCAN_DONE &&
|
||||
fcp->isp_loopstate != LOOP_LSCAN_DONE) {
|
||||
isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: bad loopstate %d",
|
||||
fcp->isp_loopstate);
|
||||
if (fcp->isp_loopstate != LOOP_PDB_RCVD && fcp->isp_loopstate != LOOP_FSCAN_DONE && fcp->isp_loopstate != LOOP_LSCAN_DONE) {
|
||||
isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: bad loopstate %d", fcp->isp_loopstate);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (fcp->isp_topo == TOPO_FL_PORT ||
|
||||
fcp->isp_topo == TOPO_NL_PORT ||
|
||||
fcp->isp_topo == TOPO_N_PORT) {
|
||||
if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_NL_PORT || fcp->isp_topo == TOPO_N_PORT) {
|
||||
if (fcp->isp_loopstate < LOOP_LSCAN_DONE) {
|
||||
if (isp_scan_loop(isp, chan) != 0) {
|
||||
isp_prt(isp, ISP_LOGWARN,
|
||||
"isp_pdb_sync: isp_scan_loop failed");
|
||||
isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: isp_scan_loop failed");
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
@ -2794,15 +2780,13 @@ isp_pdb_sync(ispsoftc_t *isp, int chan)
|
||||
if (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT) {
|
||||
if (fcp->isp_loopstate < LOOP_FSCAN_DONE) {
|
||||
if (isp_scan_fabric(isp, chan) != 0) {
|
||||
isp_prt(isp, ISP_LOGWARN,
|
||||
"isp_pdb_sync: isp_scan_fabric failed");
|
||||
isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: isp_scan_fabric failed");
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
|
||||
"Chan %d Synchronizing PDBs", chan);
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d Synchronizing PDBs", chan);
|
||||
|
||||
fcp->isp_loopstate = LOOP_SYNCING_PDB;
|
||||
|
||||
@ -2831,11 +2815,7 @@ isp_pdb_sync(ispsoftc_t *isp, int chan)
|
||||
lp->state = FC_PORTDB_STATE_NIL;
|
||||
isp_async(isp, ISPASYNC_DEV_GONE, chan, lp);
|
||||
if (lp->autologin == 0) {
|
||||
(void) isp_plogx(isp, chan, lp->handle,
|
||||
lp->portid,
|
||||
PLOGX_FLG_CMD_LOGO |
|
||||
PLOGX_FLG_IMPLICIT |
|
||||
PLOGX_FLG_FREE_NPHDL, 0);
|
||||
(void) isp_plogx(isp, chan, lp->handle, lp->portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL, 0);
|
||||
} else {
|
||||
lp->autologin = 0;
|
||||
}
|
||||
@ -3081,8 +3061,7 @@ isp_scan_loop(ispsoftc_t *isp, int chan)
|
||||
for (i = 0; i < MAX_FC_TARG; i++) {
|
||||
lp = &fcp->portdb[i];
|
||||
|
||||
if (lp->state == FC_PORTDB_STATE_NIL ||
|
||||
lp->target_mode) {
|
||||
if (lp->state == FC_PORTDB_STATE_NIL || lp->target_mode) {
|
||||
continue;
|
||||
}
|
||||
if (lp->node_wwn != tmp.node_wwn) {
|
||||
@ -3600,8 +3579,7 @@ isp_scan_fabric(ispsoftc_t *isp, int chan)
|
||||
for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
|
||||
lp = &fcp->portdb[dbidx];
|
||||
|
||||
if (lp->state != FC_PORTDB_STATE_PROBATIONAL ||
|
||||
lp->target_mode) {
|
||||
if (lp->state != FC_PORTDB_STATE_PROBATIONAL || lp->target_mode) {
|
||||
continue;
|
||||
}
|
||||
if (lp->portid == portid) {
|
||||
@ -3838,8 +3816,7 @@ isp_scan_fabric(ispsoftc_t *isp, int chan)
|
||||
if (fcp->portdb[dbidx].target_mode) {
|
||||
continue;
|
||||
}
|
||||
if (fcp->portdb[dbidx].node_wwn == wwnn &&
|
||||
fcp->portdb[dbidx].port_wwn == wwpn) {
|
||||
if (fcp->portdb[dbidx].node_wwn == wwnn && fcp->portdb[dbidx].port_wwn == wwpn) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -4425,7 +4402,7 @@ isp_start(XS_T *xs)
|
||||
*tptr = 0x1999;
|
||||
}
|
||||
|
||||
if (isp_save_xs(isp, xs, &handle)) {
|
||||
if (isp_allocate_xs(isp, xs, &handle)) {
|
||||
isp_prt(isp, ISP_LOGDEBUG0, "out of xflist pointers");
|
||||
XS_SETERR(xs, HBA_BOTCH);
|
||||
return (CMD_EAGAIN);
|
||||
@ -5171,8 +5148,8 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
if ((sp->req_handle != ISP_SPCL_HANDLE) && (sp->req_handle > isp->isp_maxcmds || sp->req_handle < 1)) {
|
||||
isp_prt(isp, ISP_LOGERR, "bad request handle %d (type 0x%x)", sp->req_handle, etype);
|
||||
if (!ISP_VALID_HANDLE(isp, sp->req_handle)) {
|
||||
isp_prt(isp, ISP_LOGERR, "bad request handle 0x%x (iocb type 0x%x)", sp->req_handle, etype);
|
||||
ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */
|
||||
ISP_WRITE(isp, isp->isp_respoutrp, optr);
|
||||
continue;
|
||||
@ -5186,14 +5163,13 @@ again:
|
||||
*/
|
||||
if (etype != RQSTYPE_RESPONSE) {
|
||||
isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (type 0x%x)", sp->req_handle, etype);
|
||||
} else if (ts != RQCS_ABORTED && ts != RQCS_RESET_OCCURRED && sp->req_handle != ISP_SPCL_HANDLE) {
|
||||
} else if (ts != RQCS_ABORTED && ts != RQCS_RESET_OCCURRED) {
|
||||
isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (status 0x%x)", sp->req_handle, ts);
|
||||
}
|
||||
ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */
|
||||
ISP_WRITE(isp, isp->isp_respoutrp, optr);
|
||||
continue;
|
||||
}
|
||||
isp_destroy_handle(isp, sp->req_handle);
|
||||
if (req_status_flags & RQSTF_BUS_RESET) {
|
||||
XS_SETERR(xs, HBA_BUSRESET);
|
||||
ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1);
|
||||
@ -5329,6 +5305,7 @@ again:
|
||||
if (XS_XFRLEN(xs)) {
|
||||
ISP_DMAFREE(isp, xs, sp->req_handle);
|
||||
}
|
||||
isp_destroy_handle(isp, sp->req_handle);
|
||||
|
||||
if (((isp->isp_dblev & (ISP_LOGDEBUG1|ISP_LOGDEBUG2|ISP_LOGDEBUG3))) ||
|
||||
((isp->isp_dblev & ISP_LOGDEBUG0) && ((!XS_NOERR(xs)) ||
|
||||
@ -5689,16 +5666,19 @@ isp_parse_async(ispsoftc_t *isp, uint16_t mbox)
|
||||
* commands that complete (with no apparent error) after
|
||||
* we receive a LIP. This has been observed mostly on
|
||||
* Local Loop topologies. To be safe, let's just mark
|
||||
* all active commands as dead.
|
||||
* all active initiator commands as dead.
|
||||
*/
|
||||
if (topo == TOPO_NL_PORT || topo == TOPO_FL_PORT) {
|
||||
int i, j;
|
||||
for (i = j = 0; i < isp->isp_maxcmds; i++) {
|
||||
XS_T *xs;
|
||||
xs = isp->isp_xflist[i];
|
||||
if (xs == NULL) {
|
||||
isp_hdl_t *hdp;
|
||||
|
||||
hdp = &isp->isp_xflist[i];
|
||||
if (ISP_H2HT(hdp->handle) != ISP_HANDLE_INITIATOR) {
|
||||
continue;
|
||||
}
|
||||
xs = hdp->cmd;
|
||||
if (XS_CHANNEL(xs) != chan) {
|
||||
continue;
|
||||
}
|
||||
@ -6666,8 +6646,8 @@ isp_mbox_continue(ispsoftc_t *isp)
|
||||
ptr = isp->isp_mbxworkp;
|
||||
switch (isp->isp_lastmbxcmd) {
|
||||
case MBOX_WRITE_RAM_WORD:
|
||||
mbs.param[1] = isp->isp_mbxwrk1++;;
|
||||
mbs.param[2] = *ptr++;;
|
||||
mbs.param[1] = isp->isp_mbxwrk1++;
|
||||
mbs.param[2] = *ptr++;
|
||||
break;
|
||||
case MBOX_READ_RAM_WORD:
|
||||
*ptr++ = isp->isp_mboxtmp[2];
|
||||
@ -6677,7 +6657,7 @@ isp_mbox_continue(ispsoftc_t *isp)
|
||||
offset = isp->isp_mbxwrk1;
|
||||
offset |= isp->isp_mbxwrk8 << 16;
|
||||
|
||||
mbs.param[2] = *ptr++;;
|
||||
mbs.param[2] = *ptr++;
|
||||
mbs.param[1] = offset;
|
||||
mbs.param[8] = offset >> 16;
|
||||
isp->isp_mbxwrk1 = ++offset;
|
||||
@ -8293,6 +8273,8 @@ isp_parse_nvram_2100(ispsoftc_t *isp, uint8_t *nvram_data)
|
||||
if ((wwn >> 60) == 0) {
|
||||
wwn |= (((uint64_t) 2)<< 60);
|
||||
}
|
||||
} else {
|
||||
wwn = fcp->isp_wwpn_nvram & ~((uint64_t) 0xfff << 48);
|
||||
}
|
||||
} else {
|
||||
wwn &= ~((uint64_t) 0xfff << 48);
|
||||
@ -8358,11 +8340,6 @@ isp_parse_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data)
|
||||
ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data));
|
||||
|
||||
wwn = ISP2400_NVRAM_PORT_NAME(nvram_data);
|
||||
if (wwn) {
|
||||
if ((wwn >> 60) != 2 && (wwn >> 60) != 5) {
|
||||
wwn = 0;
|
||||
}
|
||||
}
|
||||
fcp->isp_wwpn_nvram = wwn;
|
||||
|
||||
wwn = ISP2400_NVRAM_NODE_NAME(nvram_data);
|
||||
@ -8371,6 +8348,10 @@ isp_parse_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data)
|
||||
wwn = 0;
|
||||
}
|
||||
}
|
||||
if (wwn == 0 && (fcp->isp_wwpn_nvram >> 60) == 2) {
|
||||
wwn = fcp->isp_wwpn_nvram;
|
||||
wwn &= ~((uint64_t) 0xfff << 48);
|
||||
}
|
||||
fcp->isp_wwnn_nvram = wwn;
|
||||
|
||||
if (ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data)) {
|
||||
|
@ -133,33 +133,37 @@ isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
fcparam *fcp = FCPARAM(isp, chan);
|
||||
struct isp_fc *fc = ISP_FC_PC(isp, chan);
|
||||
|
||||
ISP_LOCK(isp);
|
||||
fc->sim = sim;
|
||||
fc->path = path;
|
||||
fc->isp = isp;
|
||||
fc->ready = 1;
|
||||
|
||||
callout_init_mtx(&fc->ldt, &isp->isp_osinfo.lock, 0);
|
||||
callout_init_mtx(&fc->gdt, &isp->isp_osinfo.lock, 0);
|
||||
|
||||
if (THREAD_CREATE(isp_kthread, fc, &fc->kproc, 0, 0, "%s: fc_thrd%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
|
||||
xpt_free_path(fc->path);
|
||||
ISP_LOCK(isp);
|
||||
xpt_bus_deregister(cam_sim_path(fc->sim));
|
||||
ISP_UNLOCK(isp);
|
||||
cam_sim_free(fc->sim, FALSE);
|
||||
}
|
||||
/*
|
||||
* We start by being "loop down" if we have an initiator role
|
||||
*/
|
||||
ISP_LOCK(isp);
|
||||
if ((FCPARAM(isp, chan)->role & ISP_ROLE_INITIATOR) && fc->ldt_running == 0) {
|
||||
if (fcp->role & ISP_ROLE_INITIATOR) {
|
||||
isp_freeze_loopdown(isp, chan, "isp_attach");
|
||||
fc->ldt_running = 1;
|
||||
callout_reset(&fc->ldt, isp_quickboot_time * hz, isp_ldt, fc);
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Starting Initial Loop Down Timer @ %lu", (unsigned long) time_uptime);
|
||||
}
|
||||
ISP_UNLOCK(isp);
|
||||
if (THREAD_CREATE(isp_kthread, fc, &fc->kproc, 0, 0, "%s: fc_thrd%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
|
||||
xpt_free_path(fc->path);
|
||||
ISP_LOCK(isp);
|
||||
if (callout_active(&fc->ldt)) {
|
||||
callout_stop(&fc->ldt);
|
||||
}
|
||||
xpt_bus_deregister(cam_sim_path(fc->sim));
|
||||
ISP_UNLOCK(isp);
|
||||
cam_sim_free(fc->sim, FALSE);
|
||||
return (ENOMEM);
|
||||
}
|
||||
#ifdef ISP_INTERNAL_TARGET
|
||||
ISP_SET_PC(isp, chan, proc_active, 1);
|
||||
if (THREAD_CREATE(isp_target_thread_fc, fc, &fc->target_proc, 0, 0, "%s: isp_test_tgt%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
|
||||
@ -343,6 +347,17 @@ ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
|
||||
break;
|
||||
}
|
||||
if (IS_FC(isp)) {
|
||||
/*
|
||||
* We don't really support dual role at present on FC cards.
|
||||
*
|
||||
* We should, but a bunch of things are currently broken,
|
||||
* so don't allow it.
|
||||
*/
|
||||
if (nr == ISP_ROLE_BOTH) {
|
||||
isp_prt(isp, ISP_LOGERR, "cannot support dual role at present");
|
||||
retval = EINVAL;
|
||||
break;
|
||||
}
|
||||
*(int *)addr = FCPARAM(isp, chan)->role;
|
||||
#ifdef ISP_INTERNAL_TARGET
|
||||
ISP_LOCK(isp);
|
||||
@ -1638,7 +1653,7 @@ isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb)
|
||||
cto->ct_timeout = 10;
|
||||
}
|
||||
|
||||
if (isp_save_xs_tgt(isp, ccb, &handle)) {
|
||||
if (isp_allocate_xs_tgt(isp, ccb, &handle)) {
|
||||
xpt_print(ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
|
||||
ccb->ccb_h.status = CAM_REQUEUE_REQ;
|
||||
goto out;
|
||||
@ -2943,8 +2958,8 @@ isp_target_mark_aborted_early(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id)
|
||||
|
||||
#ifdef ISP_INTERNAL_TARGET
|
||||
// #define ISP_FORCE_TIMEOUT 1
|
||||
#define ISP_TEST_WWNS 1
|
||||
#define ISP_TEST_SEPARATE_STATUS 1
|
||||
// #define ISP_TEST_WWNS 1
|
||||
// #define ISP_TEST_SEPARATE_STATUS 1
|
||||
|
||||
#define ccb_data_offset ppriv_field0
|
||||
#define ccb_atio ppriv_ptr1
|
||||
@ -3819,21 +3834,41 @@ isp_watchdog(void *arg)
|
||||
isp = XS_ISP(xs);
|
||||
|
||||
handle = isp_find_handle(isp, xs);
|
||||
if (handle) {
|
||||
if (handle != ISP_HANDLE_FREE) {
|
||||
/*
|
||||
* Make sure the command is *really* dead before we
|
||||
* release the handle (and DMA resources) for reuse.
|
||||
* Try and make sure the command is really dead before
|
||||
* we release the handle (and DMA resources) for reuse.
|
||||
*
|
||||
* If we are successful in aborting the command then
|
||||
* we're done here because we'll get the command returned
|
||||
* back separately.
|
||||
*/
|
||||
(void) isp_control(isp, ISPCTL_ABORT_CMD, xs);
|
||||
if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* After this point, the comamnd is really dead.
|
||||
* Note that after calling the above, the command may in
|
||||
* fact have been completed.
|
||||
*/
|
||||
xs = isp_find_xs(isp, handle);
|
||||
|
||||
/*
|
||||
* If the command no longer exists, then we won't
|
||||
* be able to find the xs again with this handle.
|
||||
*/
|
||||
if (xs == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* After this point, the command is really dead.
|
||||
*/
|
||||
if (XS_XFRLEN(xs)) {
|
||||
ISP_DMAFREE(isp, xs, handle);
|
||||
}
|
||||
isp_destroy_handle(isp, handle);
|
||||
xpt_print(xs->ccb_h.path, "watchdog timeout for handle 0x%x\n", handle);
|
||||
isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
|
||||
XS_SETERR(xs, CAM_CMD_TIMEOUT);
|
||||
isp_done(xs);
|
||||
}
|
||||
@ -3924,12 +3959,12 @@ isp_gdt(void *arg)
|
||||
isp_prt(isp, ISP_LOGCONFIG, prom3, chan, lp->portid, tgt, "Gone Device Timeout");
|
||||
isp_make_gone(isp, chan, tgt);
|
||||
}
|
||||
if (more_to_do) {
|
||||
fc->gdt_running = 1;
|
||||
callout_reset(&fc->gdt, hz, isp_gdt, fc);
|
||||
} else {
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d stopping Gone Device Timer", chan);
|
||||
fc->gdt_running = 0;
|
||||
if (fc->ready) {
|
||||
if (more_to_do) {
|
||||
callout_reset(&fc->gdt, hz, isp_gdt, fc);
|
||||
} else {
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d stopping Gone Device Timer", chan);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4006,6 +4041,7 @@ isp_kthread(void *arg)
|
||||
ispsoftc_t *isp = fc->isp;
|
||||
int chan = fc - isp->isp_osinfo.pc.fc;
|
||||
int slp = 0;
|
||||
|
||||
mtx_lock(&isp->isp_osinfo.lock);
|
||||
|
||||
for (;;) {
|
||||
@ -4238,6 +4274,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
|
||||
isp_disable_lun(isp, ccb);
|
||||
}
|
||||
break;
|
||||
case XPT_IMMED_NOTIFY:
|
||||
case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
|
||||
case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
|
||||
{
|
||||
@ -4287,11 +4324,19 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
|
||||
SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
|
||||
ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n",
|
||||
((struct ccb_immediate_notify *)ccb)->seq_id, tptr->inot_count);
|
||||
} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
|
||||
tptr->inot_count++;
|
||||
SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
|
||||
ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n",
|
||||
((struct ccb_immediate_notify *)ccb)->seq_id, tptr->inot_count);
|
||||
}
|
||||
rls_lun_statep(isp, tptr);
|
||||
ccb->ccb_h.status = CAM_REQ_INPROG;
|
||||
break;
|
||||
}
|
||||
case XPT_NOTIFY_ACK:
|
||||
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
|
||||
break;
|
||||
case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */
|
||||
{
|
||||
tstate_t *tptr;
|
||||
@ -4601,10 +4646,21 @@ isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp
|
||||
}
|
||||
break;
|
||||
case KNOB_ROLE_BOTH:
|
||||
#if 0
|
||||
if (fcp->role != ISP_ROLE_BOTH) {
|
||||
rchange = 1;
|
||||
newrole = ISP_ROLE_BOTH;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* We don't really support dual role at present on FC cards.
|
||||
*
|
||||
* We should, but a bunch of things are currently broken,
|
||||
* so don't allow it.
|
||||
*/
|
||||
isp_prt(isp, ISP_LOGERR, "cannot support dual role at present");
|
||||
ccb->ccb_h.status = CAM_REQ_INVALID;
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
if (rchange) {
|
||||
@ -4771,6 +4827,7 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
char *msg = NULL;
|
||||
target_id_t tgt;
|
||||
fcportdb_t *lp;
|
||||
struct isp_fc *fc;
|
||||
struct cam_path *tmppath;
|
||||
va_list ap;
|
||||
|
||||
@ -4855,7 +4912,6 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
/* FALLTHROUGH */
|
||||
case ISPASYNC_LOOP_DOWN:
|
||||
{
|
||||
struct isp_fc *fc;
|
||||
if (msg == NULL) {
|
||||
msg = "LOOP Down";
|
||||
}
|
||||
@ -4863,20 +4919,21 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
bus = va_arg(ap, int);
|
||||
va_end(ap);
|
||||
|
||||
FCPARAM(isp, bus)->link_active = 1;
|
||||
FCPARAM(isp, bus)->link_active = 0;
|
||||
|
||||
fc = ISP_FC_PC(isp, bus);
|
||||
/*
|
||||
* We don't do any simq freezing if we are only in target mode
|
||||
*/
|
||||
if (fc->role & ISP_ROLE_INITIATOR) {
|
||||
if (fc->path) {
|
||||
isp_freeze_loopdown(isp, bus, msg);
|
||||
}
|
||||
if (fc->ldt_running == 0) {
|
||||
fc->ldt_running = 1;
|
||||
callout_reset(&fc->ldt, fc->loop_down_limit * hz, isp_ldt, fc);
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "starting Loop Down Timer @ %lu", (unsigned long) time_uptime);
|
||||
if (cmd == ISPASYNC_LOOP_DOWN && fc->ready) {
|
||||
/*
|
||||
* We don't do any simq freezing if we are only in target mode
|
||||
*/
|
||||
if (fc->role & ISP_ROLE_INITIATOR) {
|
||||
if (fc->path) {
|
||||
isp_freeze_loopdown(isp, bus, msg);
|
||||
}
|
||||
if (!callout_active(&fc->ldt)) {
|
||||
callout_reset(&fc->ldt, fc->loop_down_limit * hz, isp_ldt, fc);
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "starting Loop Down Timer @ %lu", (unsigned long) time_uptime);
|
||||
}
|
||||
}
|
||||
}
|
||||
isp_prt(isp, ISP_LOGINFO, "Chan %d: %s", bus, msg);
|
||||
@ -4886,6 +4943,7 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
va_start(ap, cmd);
|
||||
bus = va_arg(ap, int);
|
||||
va_end(ap);
|
||||
fc = ISP_FC_PC(isp, bus);
|
||||
/*
|
||||
* Now we just note that Loop has come up. We don't
|
||||
* actually do anything because we're waiting for a
|
||||
@ -4893,8 +4951,8 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
* thread to look at the state of the loop again.
|
||||
*/
|
||||
FCPARAM(isp, bus)->link_active = 1;
|
||||
ISP_FC_PC(isp, bus)->loop_dead = 0;
|
||||
ISP_FC_PC(isp, bus)->loop_down_time = 0;
|
||||
fc->loop_dead = 0;
|
||||
fc->loop_down_time = 0;
|
||||
isp_prt(isp, ISP_LOGINFO, "Chan %d Loop UP", bus);
|
||||
break;
|
||||
case ISPASYNC_DEV_ARRIVED:
|
||||
@ -4902,8 +4960,9 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
bus = va_arg(ap, int);
|
||||
lp = va_arg(ap, fcportdb_t *);
|
||||
va_end(ap);
|
||||
fc = ISP_FC_PC(isp, bus);
|
||||
lp->reserved = 0;
|
||||
if ((ISP_FC_PC(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
|
||||
if ((fc->role & ISP_ROLE_INITIATOR) && (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
|
||||
int dbidx = lp - FCPARAM(isp, bus)->portdb;
|
||||
int i;
|
||||
|
||||
@ -4936,6 +4995,7 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
bus = va_arg(ap, int);
|
||||
lp = va_arg(ap, fcportdb_t *);
|
||||
va_end(ap);
|
||||
fc = ISP_FC_PC(isp, bus);
|
||||
lp->reserved = 0;
|
||||
if (isp_change_is_bad) {
|
||||
lp->state = FC_PORTDB_STATE_NIL;
|
||||
@ -4982,6 +5042,7 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
bus = va_arg(ap, int);
|
||||
lp = va_arg(ap, fcportdb_t *);
|
||||
va_end(ap);
|
||||
fc = ISP_FC_PC(isp, bus);
|
||||
/*
|
||||
* If this has a virtual target and we haven't marked it
|
||||
* that we're going to have isp_gdt tell the OS it's gone,
|
||||
@ -4994,10 +5055,9 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
lp->reserved = 1;
|
||||
lp->new_reserved = ISP_FC_PC(isp, bus)->gone_device_time;
|
||||
lp->state = FC_PORTDB_STATE_ZOMBIE;
|
||||
if (ISP_FC_PC(isp, bus)->gdt_running == 0) {
|
||||
if (fc->ready && !callout_active(&fc->gdt)) {
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d starting Gone Device Timer", bus);
|
||||
ISP_FC_PC(isp, bus)->gdt_running = 1;
|
||||
callout_reset(&ISP_FC_PC(isp, bus)->gdt, hz, isp_gdt, ISP_FC_PC(isp, bus));
|
||||
callout_reset(&fc->gdt, hz, isp_gdt, fc);
|
||||
}
|
||||
tgt = lp->dev_map_idx - 1;
|
||||
isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "gone zombie at", tgt, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
|
||||
@ -5022,6 +5082,7 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
nlstate = reason = 0;
|
||||
}
|
||||
va_end(ap);
|
||||
fc = ISP_FC_PC(isp, bus);
|
||||
|
||||
if (evt == ISPASYNC_CHANGE_PDB) {
|
||||
msg = "Chan %d Port Database Changed";
|
||||
@ -5034,16 +5095,15 @@ isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
|
||||
/*
|
||||
* If the loop down timer is running, cancel it.
|
||||
*/
|
||||
if (ISP_FC_PC(isp, bus)->ldt_running) {
|
||||
if (fc->ready && callout_active(&fc->ldt)) {
|
||||
isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Stopping Loop Down Timer @ %lu", (unsigned long) time_uptime);
|
||||
ISP_FC_PC(isp, bus)->ldt_running = 0;
|
||||
callout_stop(&ISP_FC_PC(isp, bus)->ldt);
|
||||
callout_stop(&fc->ldt);
|
||||
}
|
||||
isp_prt(isp, ISP_LOGINFO, msg, bus);
|
||||
if (ISP_FC_PC(isp, bus)->role & ISP_ROLE_INITIATOR) {
|
||||
if (fc->role & ISP_ROLE_INITIATOR) {
|
||||
isp_freeze_loopdown(isp, bus, msg);
|
||||
}
|
||||
wakeup(ISP_FC_PC(isp, bus));
|
||||
wakeup(fc);
|
||||
break;
|
||||
}
|
||||
#ifdef ISP_TARGET_MODE
|
||||
|
@ -177,9 +177,9 @@ struct isp_fc {
|
||||
hysteresis : 8,
|
||||
role : 2,
|
||||
gdt_running : 1,
|
||||
ldt_running : 1,
|
||||
loop_dead : 1,
|
||||
fcbsy : 1;
|
||||
fcbsy : 1,
|
||||
ready : 1;
|
||||
struct callout ldt; /* loop down timer */
|
||||
struct callout gdt; /* gone device timer */
|
||||
#ifdef ISP_TARGET_MODE
|
||||
|
@ -246,65 +246,70 @@ copy_and_sync:
|
||||
}
|
||||
|
||||
int
|
||||
isp_save_xs(ispsoftc_t *isp, XS_T *xs, uint32_t *handlep)
|
||||
isp_allocate_xs(ispsoftc_t *isp, XS_T *xs, uint32_t *handlep)
|
||||
{
|
||||
uint16_t i, j;
|
||||
isp_hdl_t *hdp;
|
||||
|
||||
for (j = isp->isp_lasthdls, i = 0; i < isp->isp_maxcmds; i++) {
|
||||
if (isp->isp_xflist[j] == NULL) {
|
||||
break;
|
||||
}
|
||||
if (++j == isp->isp_maxcmds) {
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
if (i == isp->isp_maxcmds) {
|
||||
hdp = isp->isp_xffree;
|
||||
if (hdp == NULL) {
|
||||
return (-1);
|
||||
}
|
||||
isp->isp_xflist[j] = xs;
|
||||
*handlep = j+1;
|
||||
if (++j == isp->isp_maxcmds) {
|
||||
j = 0;
|
||||
}
|
||||
isp->isp_lasthdls = (uint32_t)j;
|
||||
isp->isp_xffree = hdp->cmd;
|
||||
hdp->cmd = xs;
|
||||
hdp->handle = (hdp - isp->isp_xflist);
|
||||
hdp->handle |= (ISP_HANDLE_INITIATOR << ISP_HANDLE_USAGE_SHIFT);
|
||||
hdp->handle |= (isp->isp_seqno++ << ISP_HANDLE_SEQ_SHIFT);
|
||||
*handlep = hdp->handle;
|
||||
return (0);
|
||||
}
|
||||
|
||||
XS_T *
|
||||
isp_find_xs(ispsoftc_t *isp, uint32_t handle)
|
||||
{
|
||||
if (handle < 1 || handle > (uint32_t) isp->isp_maxcmds) {
|
||||
if (!ISP_VALID_INI_HANDLE(isp, handle)) {
|
||||
isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle);
|
||||
return (NULL);
|
||||
} else {
|
||||
return (isp->isp_xflist[handle - 1]);
|
||||
}
|
||||
return (isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].cmd);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
isp_find_handle(ispsoftc_t *isp, XS_T *xs)
|
||||
{
|
||||
uint16_t i;
|
||||
uint32_t i, foundhdl = ISP_HANDLE_FREE;
|
||||
|
||||
if (xs != NULL) {
|
||||
for (i = 0; i < isp->isp_maxcmds; i++) {
|
||||
if (isp->isp_xflist[i] == xs) {
|
||||
return ((uint32_t) (i+1));
|
||||
if (isp->isp_xflist[i].cmd != xs) {
|
||||
continue;
|
||||
}
|
||||
foundhdl = isp->isp_xflist[i].handle;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (0);
|
||||
return (foundhdl);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
isp_handle_index(uint32_t handle)
|
||||
isp_handle_index(ispsoftc_t *isp, uint32_t handle)
|
||||
{
|
||||
return (handle - 1);
|
||||
if (!ISP_VALID_HANDLE(isp, handle)) {
|
||||
return (handle & ISP_HANDLE_CMD_MASK);
|
||||
} else {
|
||||
isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle);
|
||||
return (ISP_BAD_HANDLE_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
isp_destroy_handle(ispsoftc_t *isp, uint32_t handle)
|
||||
{
|
||||
if (handle > 0 && handle <= (uint32_t) isp->isp_maxcmds) {
|
||||
isp->isp_xflist[handle - 1] = NULL;
|
||||
if (!ISP_VALID_INI_HANDLE(isp, handle)) {
|
||||
isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle);
|
||||
} else {
|
||||
isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].handle = ISP_HANDLE_FREE;
|
||||
isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].cmd = isp->isp_xffree;
|
||||
isp->isp_xffree = &isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)];
|
||||
}
|
||||
}
|
||||
|
||||
@ -617,60 +622,54 @@ isp_fc_change_role(ispsoftc_t *isp, int chan, int new_role)
|
||||
void
|
||||
isp_clear_commands(ispsoftc_t *isp)
|
||||
{
|
||||
XS_T *xs;
|
||||
uint32_t tmp, handle;
|
||||
uint32_t tmp;
|
||||
isp_hdl_t *hdp;
|
||||
#ifdef ISP_TARGET_MODE
|
||||
isp_notify_t notify;
|
||||
#endif
|
||||
|
||||
for (tmp = 0; isp->isp_xflist && tmp < isp->isp_maxcmds; tmp++) {
|
||||
xs = isp->isp_xflist[tmp];
|
||||
if (xs == NULL) {
|
||||
continue;
|
||||
}
|
||||
handle = isp_find_handle(isp, xs);
|
||||
if (handle == 0) {
|
||||
XS_T *xs;
|
||||
|
||||
hdp = &isp->isp_xflist[tmp];
|
||||
if (hdp->handle == ISP_HANDLE_FREE) {
|
||||
continue;
|
||||
}
|
||||
xs = hdp->cmd;
|
||||
if (XS_XFRLEN(xs)) {
|
||||
ISP_DMAFREE(isp, xs, handle);
|
||||
ISP_DMAFREE(isp, xs, hdp->handle);
|
||||
XS_SET_RESID(xs, XS_XFRLEN(xs));
|
||||
} else {
|
||||
XS_SET_RESID(xs, 0);
|
||||
}
|
||||
isp_destroy_handle(isp, handle);
|
||||
hdp->handle = 0;
|
||||
hdp->cmd = NULL;
|
||||
XS_SETERR(xs, HBA_BUSRESET);
|
||||
isp_done(xs);
|
||||
}
|
||||
#ifdef ISP_TARGET_MODE
|
||||
for (tmp = 0; isp->isp_tgtlist && tmp < isp->isp_maxcmds; tmp++) {
|
||||
uint8_t local[QENTRY_LEN];
|
||||
|
||||
xs = isp->isp_tgtlist[tmp];
|
||||
if (xs == NULL) {
|
||||
hdp = &isp->isp_tgt_xflist[tmp];
|
||||
if (hdp->handle == ISP_HANDLE_FREE) {
|
||||
continue;
|
||||
}
|
||||
handle = isp_find_tgt_handle(isp, xs);
|
||||
if (handle == 0) {
|
||||
continue;
|
||||
}
|
||||
ISP_DMAFREE(isp, xs, handle);
|
||||
|
||||
ISP_DMAFREE(isp, hdp->cmd, hdp->handle);
|
||||
ISP_MEMZERO(local, QENTRY_LEN);
|
||||
if (IS_24XX(isp)) {
|
||||
ct7_entry_t *ctio = (ct7_entry_t *) local;
|
||||
ctio->ct_syshandle = handle;
|
||||
ctio->ct_syshandle = hdp->handle;
|
||||
ctio->ct_nphdl = CT_HBA_RESET;
|
||||
ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
|
||||
} else if (IS_FC(isp)) {
|
||||
ct2_entry_t *ctio = (ct2_entry_t *) local;
|
||||
ctio->ct_syshandle = handle;
|
||||
ctio->ct_syshandle = hdp->handle;
|
||||
ctio->ct_status = CT_HBA_RESET;
|
||||
ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
|
||||
} else {
|
||||
ct_entry_t *ctio = (ct_entry_t *) local;
|
||||
ctio->ct_syshandle = handle & 0xffff;
|
||||
ctio->ct_status = CT_HBA_RESET & 0xff;;
|
||||
ctio->ct_syshandle = hdp->handle & 0xffff;
|
||||
ctio->ct_status = CT_HBA_RESET & 0xff;
|
||||
ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO;
|
||||
}
|
||||
isp_async(isp, ISPASYNC_TARGET_ACTION, local);
|
||||
@ -2229,59 +2228,59 @@ isp_send_tgt_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_
|
||||
}
|
||||
|
||||
int
|
||||
isp_save_xs_tgt(ispsoftc_t *isp, void *xs, uint32_t *handlep)
|
||||
isp_allocate_xs_tgt(ispsoftc_t *isp, void *xs, uint32_t *handlep)
|
||||
{
|
||||
int i;
|
||||
isp_hdl_t *hdp;
|
||||
|
||||
for (i = 0; i < (int) isp->isp_maxcmds; i++) {
|
||||
if (isp->isp_tgtlist[i] == NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == isp->isp_maxcmds) {
|
||||
hdp = isp->isp_tgtfree;
|
||||
if (hdp == NULL) {
|
||||
return (-1);
|
||||
}
|
||||
isp->isp_tgtlist[i] = xs;
|
||||
*handlep = (i+1) | 0x8000;
|
||||
isp->isp_tgtfree = hdp->cmd;
|
||||
hdp->cmd = xs;
|
||||
hdp->handle = (hdp - isp->isp_tgtlist);
|
||||
hdp->handle |= (ISP_HANDLE_TARGET << ISP_HANDLE_USAGE_SHIFT);
|
||||
hdp->handle |= (isp->isp_seqno++ << ISP_HANDLE_SEQ_SHIFT);
|
||||
*handlep = hdp->handle;
|
||||
return (0);
|
||||
}
|
||||
|
||||
void *
|
||||
isp_find_xs_tgt(ispsoftc_t *isp, uint32_t handle)
|
||||
{
|
||||
if (handle == 0 || IS_TARGET_HANDLE(handle) == 0 || (handle & ISP_HANDLE_MASK) > isp->isp_maxcmds) {
|
||||
isp_prt(isp, ISP_LOGERR, "bad handle %u in isp_find_xs_tgt", handle);
|
||||
if (!ISP_VALID_TGT_HANDLE(isp, handle)) {
|
||||
isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle);
|
||||
return (NULL);
|
||||
} else {
|
||||
return (isp->isp_tgtlist[(handle & ISP_HANDLE_MASK) - 1]);
|
||||
}
|
||||
return (isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)].cmd);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
isp_find_tgt_handle(ispsoftc_t *isp, void *xs)
|
||||
{
|
||||
int i;
|
||||
uint32_t i, foundhdl = ISP_HANDLE_FREE;
|
||||
|
||||
if (xs != NULL) {
|
||||
for (i = 0; i < isp->isp_maxcmds; i++) {
|
||||
if (isp->isp_tgtlist[i] == xs) {
|
||||
uint32_t handle = i;
|
||||
handle += 1;
|
||||
handle &= ISP_HANDLE_MASK;
|
||||
handle |= 0x8000;
|
||||
return (handle);
|
||||
if (isp->isp_tgtlist[i].cmd != xs) {
|
||||
continue;
|
||||
}
|
||||
foundhdl = isp->isp_tgtlist[i].handle;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (0);
|
||||
return (foundhdl);
|
||||
}
|
||||
|
||||
void
|
||||
isp_destroy_tgt_handle(ispsoftc_t *isp, uint32_t handle)
|
||||
{
|
||||
if (handle == 0 || IS_TARGET_HANDLE(handle) == 0 || (handle & ISP_HANDLE_MASK) > isp->isp_maxcmds) {
|
||||
isp_prt(isp, ISP_LOGERR, "bad handle in isp_destroy_tgt_handle");
|
||||
if (!ISP_VALID_TGT_HANDLE(handle)) {
|
||||
isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle);
|
||||
} else {
|
||||
isp->isp_tgtlist[(handle & ISP_HANDLE_MASK) - 1] = NULL;
|
||||
isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)].handle = ISP_HANDLE_FREE;
|
||||
isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)].cmd = isp->isp_tgtfree;
|
||||
isp->isp_tgtfree = &isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,10 +43,10 @@ int isp_send_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t);
|
||||
*
|
||||
* These handles are associate with a command.
|
||||
*/
|
||||
int isp_save_xs(ispsoftc_t *, XS_T *, uint32_t *);
|
||||
int isp_allocate_xs(ispsoftc_t *, XS_T *, uint32_t *);
|
||||
XS_T * isp_find_xs(ispsoftc_t *, uint32_t);
|
||||
uint32_t isp_find_handle(ispsoftc_t *, XS_T *);
|
||||
uint32_t isp_handle_index(uint32_t);
|
||||
uint32_t isp_handle_index(ispsoftc_t *, uint32_t);
|
||||
void isp_destroy_handle(ispsoftc_t *, uint32_t);
|
||||
|
||||
/*
|
||||
@ -156,9 +156,7 @@ void isp_put_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *);
|
||||
|
||||
int isp_send_tgt_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t, void *, uint32_t);
|
||||
|
||||
#define IS_TARGET_HANDLE(x) ((x) & 0x8000)
|
||||
|
||||
int isp_save_xs_tgt(ispsoftc_t *, void *, uint32_t *);
|
||||
int isp_allocate_xs_tgt(ispsoftc_t *, void *, uint32_t *);
|
||||
void *isp_find_xs_tgt(ispsoftc_t *, uint32_t);
|
||||
uint32_t isp_find_tgt_handle(ispsoftc_t *, void *);
|
||||
void isp_destroy_tgt_handle(ispsoftc_t *, uint32_t);
|
||||
|
@ -1516,17 +1516,21 @@ isp_pci_mbxdma(ispsoftc_t *isp)
|
||||
return (1);
|
||||
}
|
||||
|
||||
len = sizeof (XS_T **) * isp->isp_maxcmds;
|
||||
isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
|
||||
isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
if (isp->isp_xflist == NULL) {
|
||||
free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
|
||||
ISP_LOCK(isp);
|
||||
isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
|
||||
return (1);
|
||||
}
|
||||
for (len = 0; len < isp->isp_maxcmds - 1; len++) {
|
||||
isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
|
||||
}
|
||||
isp->isp_xffree = isp->isp_xflist;
|
||||
#ifdef ISP_TARGET_MODE
|
||||
len = sizeof (void **) * isp->isp_maxcmds;
|
||||
isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
len = sizeof (isp_hdl_t *) * isp->isp_maxcmds;
|
||||
isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
if (isp->isp_tgtlist == NULL) {
|
||||
free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
|
||||
free(isp->isp_xflist, M_DEVBUF);
|
||||
@ -1534,6 +1538,10 @@ isp_pci_mbxdma(ispsoftc_t *isp)
|
||||
isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
|
||||
return (1);
|
||||
}
|
||||
for (len = 0; len < isp->isp_maxcmds - 1; len++) {
|
||||
isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1];
|
||||
}
|
||||
isp->isp_tgtfree = isp->isp_tgtlist;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -455,13 +455,17 @@ isp_sbus_mbxdma(ispsoftc_t *isp)
|
||||
return (1);
|
||||
}
|
||||
|
||||
len = sizeof (XS_T **) * isp->isp_maxcmds;
|
||||
isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
len = sizeof (isp_hdl_t *) * isp->isp_maxcmds;
|
||||
isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
if (isp->isp_xflist == NULL) {
|
||||
isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
|
||||
ISP_LOCK(isp);
|
||||
return (1);
|
||||
}
|
||||
for (len = 0; len < isp->isp_maxcmds - 1; len++) {
|
||||
isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
|
||||
}
|
||||
isp->isp_xffree = isp->isp_xflist;
|
||||
len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
|
||||
|
||||
if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_SBD(isp)), 1,
|
||||
|
@ -31,7 +31,6 @@
|
||||
*/
|
||||
#ifndef _ISP_STDS_H
|
||||
#define _ISP_STDS_H
|
||||
|
||||
/*
|
||||
* FC Frame Header
|
||||
*
|
||||
@ -147,6 +146,7 @@ typedef struct {
|
||||
#define FCP_SNSLEN_VALID 0x02
|
||||
#define FCP_RSPLEN_VALID 0x01
|
||||
|
||||
#define FCP_MAX_RSPLEN 0x08
|
||||
/*
|
||||
* FCP Response Code Definitions
|
||||
* Source: NCITS T10, Project 1144D, Revision 08 (aka FCP2r08)
|
||||
@ -159,6 +159,8 @@ typedef struct {
|
||||
#define FCP_RSPNS_EROFS 3
|
||||
#define FCP_RSPNS_TMF_REJECT 4
|
||||
#define FCP_RSPNS_TMF_FAILED 5
|
||||
#define FCP_RSPNS_TMF_SUCCEEDED 8
|
||||
#define FCP_RSPNS_TMF_INCORRECT_LUN 9
|
||||
|
||||
|
||||
/* unconverted miscellany */
|
||||
|
@ -1,428 +0,0 @@
|
||||
/* $FreeBSD$ */
|
||||
/*-
|
||||
* Copyright (c) 1997-2007 by Matthew Jacob
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
/*
|
||||
* Host Adapter Public Target Interface Structures && Routines
|
||||
*/
|
||||
|
||||
#ifndef _ISP_TPUBLIC_H
|
||||
#define _ISP_TPUBLIC_H 1
|
||||
|
||||
/*
|
||||
* Action codes set by the MD target driver for
|
||||
* the external layer to figure out what to do with.
|
||||
*/
|
||||
typedef enum {
|
||||
QOUT_HBA_REG=0, /* the argument is a pointer to a hba_register_t */
|
||||
QOUT_ENABLE, /* the argument is a pointer to a enadis_t */
|
||||
QOUT_DISABLE, /* the argument is a pointer to a enadis_t */
|
||||
QOUT_TMD_START, /* the argument is a pointer to a tmd_cmd_t */
|
||||
QOUT_TMD_DONE, /* the argument is a pointer to a tmd_cmd_t */
|
||||
QOUT_NOTIFY, /* the argument is a pointer to a tmd_notify_t */
|
||||
QOUT_HBA_UNREG /* the argument is a pointer to a hba_register_t */
|
||||
} tact_e;
|
||||
|
||||
/*
|
||||
* Action codes set by the external layer for the
|
||||
* MD driver to figure out what to do with.
|
||||
*/
|
||||
typedef enum {
|
||||
QIN_HBA_REG=99, /* the argument is a pointer to a hba_register_t */
|
||||
QIN_GETINFO, /* the argument is a pointer to a info_t */
|
||||
QIN_SETINFO, /* the argument is a pointer to a info_t */
|
||||
QIN_GETDLIST, /* the argument is a pointer to a fc_dlist_t */
|
||||
QIN_ENABLE, /* the argument is a pointer to a enadis_t */
|
||||
QIN_DISABLE, /* the argument is a pointer to a enadis_t */
|
||||
QIN_TMD_CONT, /* the argument is a pointer to a tmd_cmd_t */
|
||||
QIN_TMD_FIN, /* the argument is a pointer to a tmd_cmd_t */
|
||||
QIN_NOTIFY_ACK, /* the argument is a pointer to a tmd_notify_t */
|
||||
QIN_HBA_UNREG, /* the argument is a pointer to a hba_register_t */
|
||||
} qact_e;
|
||||
|
||||
/*
|
||||
* This structure is used to register to other software modules the
|
||||
* binding of an HBA identifier, driver name and instance and the
|
||||
* lun width capapbilities of this target driver. It's up to each
|
||||
* platform to figure out how it wants to do this, but a typical
|
||||
* sequence would be for the MD layer to find some external module's
|
||||
* entry point and start by sending a QOUT_HBA_REG with info filled
|
||||
* in, and the external module to call back with a QIN_HBA_REG that
|
||||
* passes back the corresponding information.
|
||||
*/
|
||||
#define QR_VERSION 16
|
||||
typedef struct {
|
||||
/* NB: tags from here to r_version must never change */
|
||||
void * r_identity;
|
||||
void (*r_action)(qact_e, void *);
|
||||
char r_name[8];
|
||||
int r_inst;
|
||||
int r_version;
|
||||
uint32_t r_locator;
|
||||
uint32_t r_nchannels;
|
||||
enum { R_FC, R_SPI } r_type;
|
||||
void * r_private;
|
||||
} hba_register_t;
|
||||
|
||||
/*
|
||||
* An information structure that is used to get or set per-channel transport layer parameters.
|
||||
*/
|
||||
typedef struct {
|
||||
void * i_identity;
|
||||
enum { I_FC, I_SPI } i_type;
|
||||
int i_channel;
|
||||
int i_error;
|
||||
union {
|
||||
struct {
|
||||
uint64_t wwnn_nvram;
|
||||
uint64_t wwpn_nvram;
|
||||
uint64_t wwnn;
|
||||
uint64_t wwpn;
|
||||
} fc;
|
||||
struct {
|
||||
int iid;
|
||||
} spi;
|
||||
} i_id;
|
||||
} info_t;
|
||||
|
||||
/*
|
||||
* An information structure to return a list of logged in WWPNs. FC specific.
|
||||
*/
|
||||
typedef struct {
|
||||
void * d_identity;
|
||||
int d_channel;
|
||||
int d_error;
|
||||
int d_count;
|
||||
uint64_t * d_wwpns;
|
||||
} fc_dlist_t;
|
||||
/*
|
||||
* Notify structure
|
||||
*/
|
||||
typedef enum {
|
||||
NT_ABORT_TASK=0x1000,
|
||||
NT_ABORT_TASK_SET,
|
||||
NT_CLEAR_ACA,
|
||||
NT_CLEAR_TASK_SET,
|
||||
NT_LUN_RESET,
|
||||
NT_TARGET_RESET,
|
||||
NT_BUS_RESET,
|
||||
NT_LIP_RESET,
|
||||
NT_LINK_UP,
|
||||
NT_LINK_DOWN,
|
||||
NT_LOGOUT,
|
||||
NT_HBA_RESET
|
||||
} tmd_ncode_t;
|
||||
|
||||
typedef struct tmd_notify {
|
||||
void * nt_hba; /* HBA tag */
|
||||
uint64_t nt_iid; /* inititator id */
|
||||
uint64_t nt_tgt; /* target id */
|
||||
uint16_t nt_lun; /* logical unit */
|
||||
uint16_t : 15,
|
||||
nt_need_ack : 1; /* this notify needs an ACK */
|
||||
uint64_t nt_tagval; /* tag value */
|
||||
uint32_t nt_channel; /* channel id */
|
||||
tmd_ncode_t nt_ncode; /* action */
|
||||
void * nt_lreserved;
|
||||
void * nt_hreserved;
|
||||
} tmd_notify_t;
|
||||
#define LUN_ANY 0xffff
|
||||
#define TGT_ANY ((uint64_t) -1)
|
||||
#define INI_ANY ((uint64_t) -1)
|
||||
#define TAG_ANY ((uint64_t) 0)
|
||||
#define MATCH_TMD(tmd, iid, lun, tag) \
|
||||
( \
|
||||
(tmd) && \
|
||||
(iid == INI_ANY || iid == tmd->cd_iid) && \
|
||||
(lun == LUN_ANY || lun == tmd->cd_lun) && \
|
||||
(tag == TAG_ANY || tag == tmd->cd_tagval) \
|
||||
)
|
||||
|
||||
/*
|
||||
* A word about ENABLE/DISABLE: the argument is a pointer to a enadis_t
|
||||
* with en_hba, en_iid, en_chan, en_tgt and en_lun filled out.
|
||||
*
|
||||
* If an error occurs in either enabling or disabling the described lun
|
||||
* cd_error is set with an appropriate non-zero value.
|
||||
*/
|
||||
typedef struct {
|
||||
void * en_private; /* for outer layer usage */
|
||||
void * en_hba; /* HBA tag */
|
||||
uint64_t en_iid; /* initiator ID */
|
||||
uint64_t en_tgt; /* target id */
|
||||
uint16_t en_lun; /* logical unit */
|
||||
uint16_t en_chan; /* channel on card */
|
||||
int en_error;
|
||||
} enadis_t;
|
||||
|
||||
/*
|
||||
* Suggested Software Target Mode Command Handling structure.
|
||||
*
|
||||
* A note about terminology:
|
||||
*
|
||||
* MD stands for "Machine Dependent".
|
||||
*
|
||||
* This driver is structured in three layers: Outer MD, core, and inner MD.
|
||||
* The latter also is bus dependent (i.e., is cognizant of PCI bus issues
|
||||
* as well as platform issues).
|
||||
*
|
||||
*
|
||||
* "Outer Layer" means "Other Module"
|
||||
*
|
||||
* Some additional module that actually implements SCSI target command
|
||||
* policy is the recipient of incoming commands and the source of the
|
||||
* disposition for them.
|
||||
*
|
||||
* The command structure below is one suggested possible MD command structure,
|
||||
* but since the handling of thbis is entirely in the MD layer, there is
|
||||
* no explicit or implicit requirement that it be used.
|
||||
*
|
||||
* The cd_private tag should be used by the MD layer to keep a free list
|
||||
* of these structures. Code outside of this driver can then use this
|
||||
* to identify it's own unit structures. That is, when not on the MD
|
||||
* layer's freelist, the MD layer should shove into it the identifier
|
||||
* that the outer layer has for it- passed in on an initial QIN_HBA_REG
|
||||
* call (see below).
|
||||
*
|
||||
* The cd_hba tag is a tag that uniquely identifies the HBA this target
|
||||
* mode command is coming from. The outer layer has to pass this back
|
||||
* unchanged to avoid chaos.
|
||||
*
|
||||
* The cd_iid, cd_tgt, cd_lun and cd_port tags are used to identify the
|
||||
* id of the initiator who sent us a command, the target claim to be, the
|
||||
* lun on the target we claim to be, and the port instance (for multiple
|
||||
* port host adapters) that this applies to (consider it an extra port
|
||||
* parameter). The iid, tgt and lun values are deliberately chosen to be
|
||||
* fat so that, for example, World Wide Names can be used instead of
|
||||
* the units that the firmware uses (in the case where the MD
|
||||
* layer maintains a port database, for example).
|
||||
*
|
||||
* The cd_tagtype field specifies what kind of command tag type, if
|
||||
* any, has been sent with the command. Note that the Outer Layer
|
||||
* still needs to pass the tag handle through unchanged even
|
||||
* if the tag type is CD_UNTAGGED.
|
||||
*
|
||||
* The cd_cdb contains storage for the passed in command descriptor block.
|
||||
* There is no need to define length as the callee should be able to
|
||||
* figure this out.
|
||||
*
|
||||
* The tag cd_lflags are the flags set by the MD driver when it gets
|
||||
* command incoming or when it needs to inform any outside entities
|
||||
* that the last requested action failed.
|
||||
*
|
||||
* The tag cd_hflags should be set by any outside software to indicate
|
||||
* the validity of sense and status fields (defined below) and to indicate
|
||||
* the direction data is expected to move. It is an error to have both
|
||||
* CDFH_DATA_IN and CDFH_DATA_OUT set.
|
||||
*
|
||||
* If the CDFH_STSVALID flag is set, the command should be completed (after
|
||||
* sending any data and/or status). If CDFH_SNSVALID is set and the MD layer
|
||||
* can also handle sending the associated sense data (either back with an
|
||||
* FCP RESPONSE IU for Fibre Channel or otherwise automatically handling a
|
||||
* REQUEST SENSE from the initator for this target/lun), the MD layer will
|
||||
* set the CDFL_SENTSENSE flag on successful transmission of the sense data.
|
||||
* It is an error for the CDFH_SNSVALID bit to be set and CDFH_STSVALID not
|
||||
* to be set. It is an error for the CDFH_SNSVALID be set and the associated
|
||||
* SCSI status (cd_scsi_status) not be set to CHECK CONDITON.
|
||||
*
|
||||
* The tag cd_data points to a data segment to either be filled or
|
||||
* read from depending on the direction of data movement. The tag
|
||||
* is undefined if no data direction is set. The MD layer and outer
|
||||
* layers must agree on the meaning of cd_data and it is specifically
|
||||
* not defined here.
|
||||
*
|
||||
* The tag cd_totlen is the total data amount expected to be moved
|
||||
* over the life of the command. It may be set by the MD layer, possibly
|
||||
* from the datalen field of an FCP CMND IU unit. If it shows up in the outer
|
||||
* layers set to zero and the CDB indicates data should be moved, the outer
|
||||
* layer should set it to the amount expected to be moved.
|
||||
*
|
||||
* The tag cd_resid should be the total residual of data not transferred.
|
||||
* The outer layers need to set this at the begining of command processing
|
||||
* to equal cd_totlen. As data is successfully moved, this value is decreased.
|
||||
* At the end of a command, any nonzero residual indicates the number of bytes
|
||||
* requested by the command but not moved.
|
||||
*
|
||||
* The tag cd_xfrlen is the length of the currently active data transfer.
|
||||
* This allows several interations between any outside software and the
|
||||
* MD layer to move data.
|
||||
*
|
||||
* The reason that total length and total residual have to be tracked
|
||||
* is to keep track of relative offset.
|
||||
*
|
||||
* The tags cd_sense and cd_scsi_status are pretty obvious.
|
||||
*
|
||||
* The tag cd_error is to communicate between the MD layer and outer software
|
||||
* the current error conditions.
|
||||
*
|
||||
* The tag cd_lreserved, cd_hreserved are scratch areas for use for the MD
|
||||
* and outer layers respectively.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef TMD_CDBLEN
|
||||
#define TMD_CDBLEN 16
|
||||
#endif
|
||||
#ifndef TMD_SENSELEN
|
||||
#define TMD_SENSELEN 18
|
||||
#endif
|
||||
#ifndef QCDS
|
||||
#define QCDS (sizeof (void *))
|
||||
#endif
|
||||
|
||||
typedef struct tmd_cmd {
|
||||
void * cd_private; /* private data pointer */
|
||||
void * cd_hba; /* HBA tag */
|
||||
void * cd_data; /* 'pointer' to data */
|
||||
uint64_t cd_iid; /* initiator ID */
|
||||
uint64_t cd_tgt; /* target id */
|
||||
uint8_t cd_lun[8]; /* logical unit */
|
||||
uint64_t cd_tagval; /* tag value */
|
||||
uint32_t cd_channel; /* channel index */
|
||||
uint32_t cd_lflags; /* flags lower level sets */
|
||||
uint32_t cd_hflags; /* flags higher level sets */
|
||||
uint32_t cd_totlen; /* total data load */
|
||||
uint32_t cd_resid; /* total data residual */
|
||||
uint32_t cd_xfrlen; /* current data load */
|
||||
int32_t cd_error; /* current error */
|
||||
uint8_t cd_tagtype; /* tag type */
|
||||
uint8_t cd_scsi_status;
|
||||
uint8_t cd_sense[TMD_SENSELEN];
|
||||
uint8_t cd_cdb[TMD_CDBLEN];
|
||||
union {
|
||||
void * ptrs[QCDS / sizeof (void *)];
|
||||
uint64_t llongs[QCDS / sizeof (uint64_t)];
|
||||
uint32_t longs[QCDS / sizeof (uint32_t)];
|
||||
uint16_t shorts[QCDS / sizeof (uint16_t)];
|
||||
uint8_t bytes[QCDS];
|
||||
} cd_lreserved[4], cd_hreserved[4];
|
||||
} tmd_cmd_t;
|
||||
|
||||
/* defined tags */
|
||||
#define CD_UNTAGGED 0
|
||||
#define CD_SIMPLE_TAG 1
|
||||
#define CD_ORDERED_TAG 2
|
||||
#define CD_HEAD_TAG 3
|
||||
#define CD_ACA_TAG 4
|
||||
|
||||
#ifndef TMD_SIZE
|
||||
#define TMD_SIZE (sizeof (tmd_cmd_t))
|
||||
#endif
|
||||
|
||||
#define L0LUN_TO_FLATLUN(lptr) ((((lptr)[0] & 0x3f) << 8) | ((lptr)[1]))
|
||||
#define FLATLUN_TO_L0LUN(lptr, lun) \
|
||||
(lptr)[1] = lun & 0xff; \
|
||||
if (sizeof (lun) == 1) { \
|
||||
(lptr)[0] = 0; \
|
||||
} else { \
|
||||
uint16_t nl = lun; \
|
||||
if (nl == LUN_ANY) { \
|
||||
(lptr)[0] = (nl >> 8) & 0xff; \
|
||||
} else if (nl < 256) { \
|
||||
(lptr)[0] = 0; \
|
||||
} else { \
|
||||
(lptr)[0] = 0x40 | ((nl >> 8) & 0x3f); \
|
||||
} \
|
||||
} \
|
||||
memset(&(lptr)[2], 0, 6)
|
||||
|
||||
/*
|
||||
* Note that NODISC (obviously) doesn't apply to non-SPI transport.
|
||||
*
|
||||
* Note that knowing the data direction and lengh at the time of receipt of
|
||||
* a command from the initiator is a feature only of Fibre Channel.
|
||||
*
|
||||
* The CDFL_BIDIR is in anticipation of the adoption of some newer
|
||||
* features required by OSD.
|
||||
*
|
||||
* The principle selector for MD layer to know whether data is to
|
||||
* be transferred in any QOUT_TMD_CONT call is cd_xfrlen- the
|
||||
* flags CDFH_DATA_IN and CDFH_DATA_OUT define which direction.
|
||||
*/
|
||||
#define CDFL_SNSVALID 0x01 /* sense data (from f/w) good */
|
||||
#define CDFL_SENTSTATUS 0x02 /* last action sent status */
|
||||
#define CDFL_DATA_IN 0x04 /* target (us) -> initiator (them) */
|
||||
#define CDFL_DATA_OUT 0x08 /* initiator (them) -> target (us) */
|
||||
#define CDFL_BIDIR 0x0C /* bidirectional data */
|
||||
#define CDFL_ERROR 0x10 /* last action ended in error */
|
||||
#define CDFL_NODISC 0x20 /* disconnects disabled */
|
||||
#define CDFL_SENTSENSE 0x40 /* last action sent sense data */
|
||||
#define CDFL_BUSY 0x80 /* this command is not on a free list */
|
||||
#define CDFL_PRIVATE 0xFF000000 /* private layer flags */
|
||||
|
||||
#define CDFH_SNSVALID 0x01 /* sense data (from outer layer) good */
|
||||
#define CDFH_STSVALID 0x02 /* status valid */
|
||||
#define CDFH_DATA_IN 0x04 /* target (us) -> initiator (them) */
|
||||
#define CDFH_DATA_OUT 0x08 /* initiator (them) -> target (us) */
|
||||
#define CDFH_DATA_MASK 0x0C /* mask to cover data direction */
|
||||
#define CDFH_PRIVATE 0xFF000000 /* private layer flags */
|
||||
|
||||
|
||||
/*
|
||||
* A word about the START/CONT/DONE/FIN dance:
|
||||
*
|
||||
* When the HBA is enabled for receiving commands, one may show up
|
||||
* without notice. When that happens, the MD target mode driver
|
||||
* gets a tmd_cmd_t, fills it with the info that just arrived, and
|
||||
* calls the outer layer with a QOUT_TMD_START code and pointer to
|
||||
* the tmd_cmd_t.
|
||||
*
|
||||
* The outer layer decodes the command, fetches data, prepares stuff,
|
||||
* whatever, and starts by passing back the pointer with a QIN_TMD_CONT
|
||||
* code which causes the MD target mode driver to generate CTIOs to
|
||||
* satisfy whatever action needs to be taken. When those CTIOs complete,
|
||||
* the MD target driver sends the pointer to the cmd_tmd_t back with
|
||||
* a QOUT_TMD_DONE code. This repeats for as long as necessary. These
|
||||
* may not be done in parallel- they are sequential operations.
|
||||
*
|
||||
* The outer layer signals it wants to end the command by settings within
|
||||
* the tmd_cmd_t itself. When the final QIN_TMD_CONT is reported completed,
|
||||
* the outer layer frees the tmd_cmd_t by sending the pointer to it
|
||||
* back with a QIN_TMD_FIN code.
|
||||
*
|
||||
* The graph looks like:
|
||||
*
|
||||
* QOUT_TMD_START -> [ QIN_TMD_CONT -> QOUT_TMD_DONE ] * -> QIN_TMD_FIN.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Target handler functions.
|
||||
*
|
||||
* The MD target handler function (the outer layer calls this)
|
||||
* should be be prototyped like:
|
||||
*
|
||||
* void target_action(qact_e, void *arg)
|
||||
*
|
||||
* The outer layer target handler function (the MD layer calls this)
|
||||
* should be be prototyped like:
|
||||
*
|
||||
* void scsi_target_handler(tact_e, void *arg)
|
||||
*/
|
||||
#endif /* _ISP_TPUBLIC_H */
|
||||
/*
|
||||
* vim:ts=4:sw=4:expandtab
|
||||
*/
|
@ -26,6 +26,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Mailbox and Queue Entry Definitions for for Qlogic ISP SCSI adapters.
|
||||
*/
|
||||
@ -244,12 +245,38 @@
|
||||
#define ASYNC_RIO_COMP 0x8042
|
||||
#define ASYNC_RCV_ERR 0x8048
|
||||
|
||||
/*
|
||||
* Firmware Options. There are a lot of them.
|
||||
*
|
||||
* IFCOPTN - ISP Fibre Channel Option Word N
|
||||
*/
|
||||
#define IFCOPT1_EQFQASYNC (1 << 13) /* enable QFULL notification */
|
||||
#define IFCOPT1_EAABSRCVD (1 << 12)
|
||||
#define IFCOPT1_RJTASYNC (1 << 11) /* enable 8018 notification */
|
||||
#define IFCOPT1_ENAPURE (1 << 10)
|
||||
#define IFCOPT1_ENA8017 (1 << 7)
|
||||
#define IFCOPT1_DISGPIO67 (1 << 6)
|
||||
#define IFCOPT1_LIPLOSSIMM (1 << 5)
|
||||
#define IFCOPT1_DISF7SWTCH (1 << 4)
|
||||
#define IFCOPT1_CTIO_RETRY (1 << 3)
|
||||
#define IFCOPT1_LIPASYNC (1 << 1)
|
||||
#define IFCOPT1_LIPF8 (1 << 0)
|
||||
|
||||
#define IFCOPT2_LOOPBACK (1 << 1)
|
||||
#define IFCOPT2_ATIO3_ONLY (1 << 0)
|
||||
|
||||
#define IFCOPT3_NOPRLI (1 << 4) /* disable automatic sending of PRLI on local loops */
|
||||
#define IFCOPT3_RNDASYNC (1 << 1)
|
||||
/*
|
||||
* 2.01.31 2200 Only. Need Bit 13 in Mailbox 1 for Set Firmware Options
|
||||
* mailbox command to enable this.
|
||||
*/
|
||||
#define ASYNC_QFULL_SENT 0x8049
|
||||
|
||||
/*
|
||||
* Needs to be enabled
|
||||
*/
|
||||
#define ASYNC_AUTO_PLOGI_RJT 0x8018
|
||||
/*
|
||||
* 24XX only
|
||||
*/
|
||||
@ -260,11 +287,6 @@
|
||||
*/
|
||||
#define QENTRY_LEN 64
|
||||
|
||||
/*
|
||||
* Special Internal Handle for IOCBs
|
||||
*/
|
||||
#define ISP_SPCL_HANDLE 0xa5dead5a
|
||||
|
||||
/*
|
||||
* Command Structure Definitions
|
||||
*/
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include "ispmbox.h"
|
||||
#endif
|
||||
|
||||
#define ISP_CORE_VERSION_MAJOR 6
|
||||
#define ISP_CORE_VERSION_MAJOR 7
|
||||
#define ISP_CORE_VERSION_MINOR 0
|
||||
|
||||
/*
|
||||
@ -288,6 +288,53 @@ typedef struct {
|
||||
#define DOMAIN_CONTROLLER_BASE 0xFFFC00
|
||||
#define DOMAIN_CONTROLLER_END 0xFFFCFF
|
||||
|
||||
/*
|
||||
* Command Handles
|
||||
*
|
||||
* Most QLogic initiator or target have 32 bit handles associated with them.
|
||||
* We want to have a quick way to index back and forth between a local SCSI
|
||||
* command context and what the firmware is passing back to us. We also
|
||||
* want to avoid working on stale information. This structure handles both
|
||||
* at the expense of some local memory.
|
||||
*
|
||||
* The handle is architected thusly:
|
||||
*
|
||||
* 0 means "free handle"
|
||||
* bits 0..12 index commands
|
||||
* bits 13..15 bits index usage
|
||||
* bits 16..31 contain a rolling sequence
|
||||
*
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
void * cmd; /* associated command context */
|
||||
uint32_t handle; /* handle associated with this command */
|
||||
} isp_hdl_t;
|
||||
#define ISP_HANDLE_FREE 0x00000000
|
||||
#define ISP_HANDLE_CMD_MASK 0x00001fff
|
||||
#define ISP_HANDLE_USAGE_MASK 0x0000e000
|
||||
#define ISP_HANDLE_USAGE_SHIFT 13
|
||||
#define ISP_H2HT(hdl) ((hdl & ISP_HANDLE_USAGE_MASK) >> ISP_HANDLE_USAGE_SHIFT)
|
||||
# define ISP_HANDLE_NONE 0
|
||||
# define ISP_HANDLE_INITIATOR 1
|
||||
# define ISP_HANDLE_TARGET 2
|
||||
#define ISP_HANDLE_SEQ_MASK 0xffff0000
|
||||
#define ISP_HANDLE_SEQ_SHIFT 16
|
||||
#define ISP_H2SEQ(hdl) ((hdl & ISP_HANDLE_SEQ_MASK) >> ISP_HANDLE_SEQ_SHIFT)
|
||||
#define ISP_VALID_INI_HANDLE(c, hdl) \
|
||||
(ISP_H2HT(hdl) == ISP_HANDLE_INITIATOR && (hdl & ISP_HANDLE_CMD_MASK) < (c)->isp_maxcmds && \
|
||||
ISP_H2SEQ(hdl) == ISP_H2SEQ((c)->isp_xflist[hdl & ISP_HANDLE_CMD_MASK].handle))
|
||||
#ifdef ISP_TARGET_MODE
|
||||
#define ISP_VALID_TGT_HANDLE(c, hdl) \
|
||||
(ISP_H2HT(hdl) == ISP_HANDLE_TARGET && (hdl & ISP_HANDLE_CMD_MASK) < (c)->isp_maxcmds && \
|
||||
ISP_H2SEQ(hdl) == ISP_H2SEQ((c)->isp_tgtlist[hdl & ISP_HANDLE_CMD_MASK].handle))
|
||||
#define ISP_VALID_HANDLE(c, hdl) \
|
||||
(ISP_VALID_INI_HANDLE((c), hdl) || ISP_VALID_TGT_HANDLE((c), hdl))
|
||||
#else
|
||||
#define ISP_VALID_HANDLE ISP_VALID_INI_HANDLE
|
||||
#endif
|
||||
#define ISP_BAD_HANDLE_INDEX 0xffffffff
|
||||
|
||||
|
||||
/*
|
||||
* FC Port Database entry.
|
||||
@ -562,11 +609,11 @@ struct ispsoftc {
|
||||
isp_mboxbsy : 1, /* mailbox command active */
|
||||
isp_state : 3,
|
||||
isp_nactive : 16; /* how many commands active */
|
||||
volatile mbreg_t isp_curmbx; /* currently active mailbox command */
|
||||
volatile uint32_t isp_reqodx; /* index of last ISP pickup */
|
||||
volatile uint32_t isp_reqidx; /* index of next request */
|
||||
volatile uint32_t isp_residx; /* index of next result */
|
||||
volatile uint32_t isp_resodx; /* index of next result */
|
||||
volatile uint32_t isp_lasthdls; /* last handle seed */
|
||||
volatile uint32_t isp_obits; /* mailbox command output */
|
||||
volatile uint32_t isp_serno; /* rolling serial number */
|
||||
volatile uint16_t isp_mboxtmp[MAILBOX_STORAGE];
|
||||
@ -575,18 +622,21 @@ struct ispsoftc {
|
||||
volatile uint16_t isp_mbxwrk1;
|
||||
volatile uint16_t isp_mbxwrk2;
|
||||
volatile uint16_t isp_mbxwrk8;
|
||||
volatile uint16_t isp_seqno; /* running sequence number */
|
||||
void * isp_mbxworkp;
|
||||
|
||||
/*
|
||||
* Active commands are stored here, indexed by handle functions.
|
||||
*/
|
||||
XS_T **isp_xflist;
|
||||
isp_hdl_t *isp_xflist;
|
||||
isp_hdl_t *isp_xffree;
|
||||
|
||||
#ifdef ISP_TARGET_MODE
|
||||
/*
|
||||
* Active target commands are stored here, indexed by handle functions.
|
||||
*/
|
||||
void **isp_tgtlist;
|
||||
isp_hdl_t *isp_tgtlist;
|
||||
isp_hdl_t *isp_tgtfree;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -4463,6 +4463,26 @@ ixgbe_handle_msf(void *context, int pending)
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef IXGBE_FDIR
|
||||
/*
|
||||
** Tasklet for reinitializing the Flow Director filter table
|
||||
*/
|
||||
static void
|
||||
ixgbe_reinit_fdir(void *context, int pending)
|
||||
{
|
||||
struct adapter *adapter = context;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
|
||||
if (adapter->fdir_reinit != 1) /* Shouldn't happen */
|
||||
return;
|
||||
ixgbe_reinit_fdir_tables_82599(&adapter->hw);
|
||||
adapter->fdir_reinit = 0;
|
||||
/* Restart the interface */
|
||||
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**********************************************************************
|
||||
*
|
||||
* Update the board statistics counters.
|
||||
@ -4471,7 +4491,7 @@ ixgbe_handle_msf(void *context, int pending)
|
||||
static void
|
||||
ixgbe_update_stats_counters(struct adapter *adapter)
|
||||
{
|
||||
struct ifnet *ifp = adapter->ifp;;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 missed_rx = 0, bprc, lxon, lxoff, total;
|
||||
u64 total_missed_rx = 0;
|
||||
|
@ -2099,7 +2099,7 @@ malo_rx_proc(void *arg, int npending)
|
||||
* payload prior to constructing the header.
|
||||
*/
|
||||
m = bf->bf_m;
|
||||
data = mtod(m, uint8_t *);;
|
||||
data = mtod(m, uint8_t *);
|
||||
hdrlen = ieee80211_anyhdrsize(data + sizeof(uint16_t));
|
||||
off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
|
||||
|
||||
|
@ -1144,7 +1144,7 @@ mge_intr_tx_locked(struct mge_softc *sc)
|
||||
break;
|
||||
|
||||
sc->tx_desc_used_idx =
|
||||
(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;;
|
||||
(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
|
||||
sc->tx_desc_used_count--;
|
||||
|
||||
/* Update collision statistics */
|
||||
|
@ -3174,23 +3174,23 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow);
|
||||
ss->rx_small.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
|
||||
if (ss->rx_small.shadow == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
|
||||
bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow);
|
||||
ss->rx_big.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
|
||||
if (ss->rx_big.shadow == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
|
||||
/* allocate the rx host info rings */
|
||||
bytes = rx_ring_entries * sizeof (*ss->rx_small.info);
|
||||
ss->rx_small.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
|
||||
if (ss->rx_small.info == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
|
||||
bytes = rx_ring_entries * sizeof (*ss->rx_big.info);
|
||||
ss->rx_big.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
|
||||
if (ss->rx_big.info == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
|
||||
/* allocate the rx busdma resources */
|
||||
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
|
||||
@ -3208,7 +3208,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d allocating rx_small dmat\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
|
||||
@ -3235,7 +3235,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d allocating rx_big dmat\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
for (i = 0; i <= ss->rx_small.mask; i++) {
|
||||
err = bus_dmamap_create(ss->rx_small.dmat, 0,
|
||||
@ -3243,7 +3243,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d rx_small dmamap\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
err = bus_dmamap_create(ss->rx_small.dmat, 0,
|
||||
@ -3251,7 +3251,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d extra rx_small dmamap\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i <= ss->rx_big.mask; i++) {
|
||||
@ -3260,7 +3260,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d rx_big dmamap\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
err = bus_dmamap_create(ss->rx_big.dmat, 0,
|
||||
@ -3268,7 +3268,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d extra rx_big dmamap\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* now allocate TX resouces */
|
||||
@ -3288,7 +3288,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
sizeof (*ss->tx.req_list) * (ss->tx.max_desc + 4);
|
||||
ss->tx.req_bytes = malloc(bytes, M_DEVBUF, M_WAITOK);
|
||||
if (ss->tx.req_bytes == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
/* ensure req_list entries are aligned to 8 bytes */
|
||||
ss->tx.req_list = (mcp_kreq_ether_send_t *)
|
||||
((unsigned long)(ss->tx.req_bytes + 7) & ~7UL);
|
||||
@ -3298,13 +3298,13 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
ss->tx.seg_list = (bus_dma_segment_t *)
|
||||
malloc(bytes, M_DEVBUF, M_WAITOK);
|
||||
if (ss->tx.seg_list == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
|
||||
/* allocate the tx host info ring */
|
||||
bytes = tx_ring_entries * sizeof (*ss->tx.info);
|
||||
ss->tx.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
|
||||
if (ss->tx.info == NULL)
|
||||
return err;;
|
||||
return err;
|
||||
|
||||
/* allocate the tx busdma resources */
|
||||
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
|
||||
@ -3323,7 +3323,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d allocating tx dmat\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* now use these tags to setup dmamaps for each slot
|
||||
@ -3334,7 +3334,7 @@ mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
|
||||
if (err != 0) {
|
||||
device_printf(sc->dev, "Err %d tx dmamap\n",
|
||||
err);
|
||||
return err;;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -381,7 +381,7 @@ patm_feed_lbufs(struct patm_softc *sc)
|
||||
static void
|
||||
patm_intr_tsif(struct patm_softc *sc)
|
||||
{
|
||||
struct idt_tsqe *tsqe = sc->tsq_next;;
|
||||
struct idt_tsqe *tsqe = sc->tsq_next;
|
||||
struct idt_tsqe *prev = NULL;
|
||||
uint32_t stamp;
|
||||
|
||||
|
@ -140,7 +140,7 @@ pdq_eisa_probe (dev)
|
||||
u_int32_t maddr;
|
||||
u_int32_t msize;
|
||||
|
||||
u_int32_t eisa_id = eisa_get_id(dev);;
|
||||
u_int32_t eisa_id = eisa_get_id(dev);
|
||||
|
||||
desc = pdq_eisa_match(eisa_id);
|
||||
if (!desc) {
|
||||
|
@ -1902,7 +1902,7 @@ safe_init_board(struct safe_softc *sc)
|
||||
{
|
||||
u_int32_t v, dwords;
|
||||
|
||||
v = READ_REG(sc, SAFE_PE_DMACFG);;
|
||||
v = READ_REG(sc, SAFE_PE_DMACFG);
|
||||
v &=~ SAFE_PE_DMACFG_PEMODE;
|
||||
v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
|
||||
| SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
|
||||
|
@ -353,7 +353,7 @@ m3_wrcd(kobj_t kobj, void *devinfo, int regno, u_int32_t data)
|
||||
struct sc_info *sc = (struct sc_info *)devinfo;
|
||||
if (m3_wait(sc)) {
|
||||
device_printf(sc->dev, "m3_wrcd timed out.\n");
|
||||
return -1;;
|
||||
return -1;
|
||||
}
|
||||
m3_wr_2(sc, CODEC_DATA, data);
|
||||
m3_wr_1(sc, CODEC_COMMAND, regno & 0x7f);
|
||||
|
@ -1119,7 +1119,7 @@ ste_attach(device_t dev)
|
||||
*/
|
||||
if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) {
|
||||
device_printf(dev, "failed to read station address\n");
|
||||
error = ENXIO;;
|
||||
error = ENXIO;
|
||||
goto fail;
|
||||
}
|
||||
ste_sysctl_node(sc);
|
||||
|
@ -2770,7 +2770,7 @@ trm_DoingSRB_Done(PACB pACB)
|
||||
xpt_done(pccb);
|
||||
psrb = psrb2;
|
||||
}
|
||||
pdcb->GoingSRBCnt = 0;;
|
||||
pdcb->GoingSRBCnt = 0;
|
||||
pdcb->pGoingSRB = NULL;
|
||||
pdcb = pdcb->pNextDCB;
|
||||
}
|
||||
|
@ -1799,7 +1799,7 @@ musbotg_init(struct musbotg_softc *sc)
|
||||
MUSB2_WRITE_1(sc, MUSB2_REG_EPINDEX, temp);
|
||||
|
||||
fsize = MUSB2_READ_1(sc, MUSB2_REG_FSIZE);
|
||||
frx = (fsize & MUSB2_MASK_RX_FSIZE) / 16;;
|
||||
frx = (fsize & MUSB2_MASK_RX_FSIZE) / 16;
|
||||
ftx = (fsize & MUSB2_MASK_TX_FSIZE);
|
||||
|
||||
DPRINTF("Endpoint %u FIFO size: IN=%u, OUT=%u, DYN=%d\n",
|
||||
|
@ -3036,7 +3036,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
|
||||
case 0xad: /* READ_DVD_STRUCTURE */
|
||||
case 0xbb: /* SET_CD_SPEED */
|
||||
case 0xe5: /* READ_TRACK_INFO_PHILIPS */
|
||||
break;;
|
||||
break;
|
||||
|
||||
case READ_12:
|
||||
case WRITE_12:
|
||||
@ -3044,7 +3044,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
|
||||
DPRINTF(sc, UDMASS_SCSI, "Unsupported ATAPI "
|
||||
"command 0x%02x - trying anyway\n",
|
||||
cmd_ptr[0]);
|
||||
break;;
|
||||
break;
|
||||
}
|
||||
|
||||
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
|
||||
|
@ -1117,7 +1117,7 @@ _xfs_strategy(
|
||||
} */ *ap)
|
||||
{
|
||||
daddr_t blkno;
|
||||
struct buf *bp;;
|
||||
struct buf *bp;
|
||||
struct bufobj *bo;
|
||||
struct vnode *vp;
|
||||
struct xfs_mount *xmp;
|
||||
|
@ -480,7 +480,7 @@ pnp_create_devices(device_t parent, pnp_id *p, int csn,
|
||||
}
|
||||
resinfo = resp;
|
||||
resp += PNP_SRES_LEN(tag);
|
||||
scanning -= PNP_SRES_LEN(tag);;
|
||||
scanning -= PNP_SRES_LEN(tag);
|
||||
|
||||
switch (PNP_SRES_NUM(tag)) {
|
||||
case PNP_TAG_LOGICAL_DEVICE:
|
||||
|
@ -452,7 +452,7 @@ parse_term(struct fail_point_entries *ents, char *p)
|
||||
} else if (*p == '*') {
|
||||
if (!units || decimal)
|
||||
return 0;
|
||||
ent->fe_count = units;;
|
||||
ent->fe_count = units;
|
||||
|
||||
} else {
|
||||
return 0;
|
||||
@ -497,7 +497,7 @@ parse_number(int *out_units, int *out_decimal, char *p)
|
||||
|
||||
/* whole part */
|
||||
old_p = p;
|
||||
*out_units = strtol(p, &p, 10);;
|
||||
*out_units = strtol(p, &p, 10);
|
||||
if (p == old_p && *p != '.')
|
||||
return 0;
|
||||
|
||||
|
@ -500,7 +500,7 @@ firmware_modevent(module_t mod, int type, void *unused)
|
||||
mtx_lock(&firmware_mtx);
|
||||
for (i = 0; i < FIRMWARE_MAX; i++) {
|
||||
fp = &firmware_table[i];
|
||||
fp->flags |= FW_UNLOAD;;
|
||||
fp->flags |= FW_UNLOAD;
|
||||
}
|
||||
mtx_unlock(&firmware_mtx);
|
||||
taskqueue_enqueue(firmware_tq, &firmware_unload_task);
|
||||
|
@ -527,7 +527,7 @@ admsw_attach(device_t dev)
|
||||
ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
|
||||
ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);
|
||||
|
||||
ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);;
|
||||
ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);
|
||||
|
||||
/* Setup interface parameters */
|
||||
ifp->if_softc = sc;
|
||||
|
@ -105,7 +105,7 @@ static int
|
||||
elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
|
||||
int type, int local, elf_lookup_fn lookup)
|
||||
{
|
||||
Elf_Addr *where = (Elf_Addr *)NULL;;
|
||||
Elf_Addr *where = (Elf_Addr *)NULL;
|
||||
Elf_Addr addr;
|
||||
Elf_Addr addend = (Elf_Addr)0;
|
||||
Elf_Word rtype = (Elf_Word)0, symidx;
|
||||
|
@ -404,7 +404,7 @@ ipv4_flow_lookup_hash_internal(struct mbuf *m, struct route *ro,
|
||||
if (*flags & FL_HASH_PORTS)
|
||||
goto noop;
|
||||
/* no port - hence not a protocol we care about */
|
||||
break;;
|
||||
break;
|
||||
|
||||
}
|
||||
*protop = proto;
|
||||
|
@ -1739,7 +1739,7 @@ ieee80211_node_delucastkey(struct ieee80211_node *ni)
|
||||
status = ieee80211_crypto_delkey(ni->ni_vap, &ni->ni_ucastkey);
|
||||
if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax) {
|
||||
nikey = nt->nt_keyixmap[keyix];
|
||||
nt->nt_keyixmap[keyix] = NULL;;
|
||||
nt->nt_keyixmap[keyix] = NULL;
|
||||
}
|
||||
}
|
||||
if (!isowned)
|
||||
|
@ -2136,7 +2136,7 @@ void
|
||||
SetProtocolFlags(struct alias_link *lnk, int pflags)
|
||||
{
|
||||
|
||||
lnk->pflags = pflags;;
|
||||
lnk->pflags = pflags;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -158,7 +158,7 @@ _attach_handler(struct proto_handler *p)
|
||||
static int
|
||||
_detach_handler(struct proto_handler *p)
|
||||
{
|
||||
struct proto_handler *b, *b_tmp;;
|
||||
struct proto_handler *b, *b_tmp;
|
||||
|
||||
LIBALIAS_WLOCK_ASSERT();
|
||||
LIST_FOREACH_SAFE(b, &handler_chain, entries, b_tmp) {
|
||||
|
@ -2224,7 +2224,7 @@ sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
|
||||
}
|
||||
if (stcb->asoc.ipv4_local_scope == 0 &&
|
||||
IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
|
||||
continue;;
|
||||
continue;
|
||||
}
|
||||
if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
|
||||
SCTP_IPV6_V6ONLY(inp6)) {
|
||||
|
@ -4382,7 +4382,7 @@ sctp_add_to_readq(struct sctp_inpcb *inp,
|
||||
m = SCTP_BUF_NEXT(prev);
|
||||
}
|
||||
if (m == NULL) {
|
||||
control->tail_mbuf = prev;;
|
||||
control->tail_mbuf = prev;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -1332,7 +1332,7 @@ bootpc_compose_query(struct bootpc_ifcontext *ifctx,
|
||||
*vendp++ = TAG_VENDOR_INDENTIFIER;
|
||||
*vendp++ = vendor_client_len;
|
||||
memcpy(vendp, vendor_client, vendor_client_len);
|
||||
vendp += vendor_client_len;;
|
||||
vendp += vendor_client_len;
|
||||
ifctx->dhcpquerytype = DHCP_NOMSG;
|
||||
switch (ifctx->state) {
|
||||
case IF_DHCP_UNRESOLVED:
|
||||
|
@ -3992,7 +3992,7 @@ ncr_action (struct cam_sim *sim, union ccb *ccb)
|
||||
msgptr[msglen++] = MSG_EXT_SDTR_LEN;
|
||||
msgptr[msglen++] = MSG_EXT_SDTR;
|
||||
msgptr[msglen++] = tp->tinfo.goal.period;
|
||||
msgptr[msglen++] = tp->tinfo.goal.offset;;
|
||||
msgptr[msglen++] = tp->tinfo.goal.offset;
|
||||
if (DEBUG_FLAGS & DEBUG_NEGO) {
|
||||
PRINT_ADDR(ccb);
|
||||
printf ("sync msgout: ");
|
||||
|
@ -922,7 +922,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
thread0.td_kstack = va;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
for (i = 0; i < KSTACK_PAGES; i++) {
|
||||
moea_kenter(mmup, va, pa);;
|
||||
moea_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -935,7 +935,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
va = virtual_avail;
|
||||
virtual_avail += round_page(MSGBUF_SIZE);
|
||||
while (va < virtual_avail) {
|
||||
moea_kenter(mmup, va, pa);;
|
||||
moea_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -948,7 +948,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
va = virtual_avail;
|
||||
virtual_avail += DPCPU_SIZE;
|
||||
while (va < virtual_avail) {
|
||||
moea_kenter(mmup, va, pa);;
|
||||
moea_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
|
@ -1019,7 +1019,7 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
|
||||
thread0.td_kstack = va;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
for (i = 0; i < KSTACK_PAGES; i++) {
|
||||
moea64_kenter(mmup, va, pa);;
|
||||
moea64_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -1032,7 +1032,7 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
|
||||
va = virtual_avail;
|
||||
virtual_avail += round_page(MSGBUF_SIZE);
|
||||
while (va < virtual_avail) {
|
||||
moea64_kenter(mmup, va, pa);;
|
||||
moea64_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -1045,7 +1045,7 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
|
||||
va = virtual_avail;
|
||||
virtual_avail += DPCPU_SIZE;
|
||||
while (va < virtual_avail) {
|
||||
moea64_kenter(mmup, va, pa);;
|
||||
moea64_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
|
@ -2299,7 +2299,7 @@ make_sure_to_unlock:
|
||||
static void
|
||||
mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
|
||||
{
|
||||
pte_t *pte;;
|
||||
pte_t *pte;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
if ((pte = pte_find(mmu, pmap, va)) != NULL) {
|
||||
@ -2783,7 +2783,7 @@ tlb1_write_entry(unsigned int idx)
|
||||
mtspr(SPR_MAS7, mas7);
|
||||
__asm __volatile("isync; tlbwe; isync; msync");
|
||||
|
||||
//debugf("tlb1_write_entry: e\n");;
|
||||
//debugf("tlb1_write_entry: e\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -258,7 +258,7 @@ clnt_dg_create(
|
||||
rpc_createerr.cf_error.re_errno = 0;
|
||||
goto err2;
|
||||
}
|
||||
cu->cu_mcalllen = XDR_GETPOS(&xdrs);;
|
||||
cu->cu_mcalllen = XDR_GETPOS(&xdrs);
|
||||
|
||||
/*
|
||||
* By default, closeit is always FALSE. It is users responsibility
|
||||
|
@ -739,7 +739,7 @@ out1:
|
||||
auio.uio_iovcnt = 1;
|
||||
aiov.iov_base = (void *)snapblklist;
|
||||
aiov.iov_len = snaplistsize * sizeof(daddr_t);
|
||||
auio.uio_resid = aiov.iov_len;;
|
||||
auio.uio_resid = aiov.iov_len;
|
||||
auio.uio_offset = ip->i_size;
|
||||
auio.uio_segflg = UIO_SYSSPACE;
|
||||
auio.uio_rw = UIO_WRITE;
|
||||
|
@ -330,7 +330,7 @@ xenbus_devices_changed(struct xenbus_watch *watch,
|
||||
device_t dev = sc->xs_dev;
|
||||
char *node, *bus, *type, *id, *p;
|
||||
|
||||
node = strdup(vec[XS_WATCH_PATH], M_DEVBUF);;
|
||||
node = strdup(vec[XS_WATCH_PATH], M_DEVBUF);
|
||||
p = strchr(node, '/');
|
||||
if (!p)
|
||||
goto out;
|
||||
|
Loading…
x
Reference in New Issue
Block a user