freebsd-skq/sys/dev/aic7xxx/aic7xxx_osm.c

1863 lines
48 KiB
C
Raw Normal View History

/*
* Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
*
* Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU Public License ("GPL").
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
*
* $FreeBSD$
*/
#include <dev/aic7xxx/aic7xxx_freebsd.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <sys/eventhandler.h>
#ifndef AHC_TMODE_ENABLE
#define AHC_TMODE_ENABLE 0
#endif
#define ccb_scb_ptr spriv_ptr0
#define ccb_ahc_ptr spriv_ptr1
#ifdef AHC_DEBUG
static int ahc_debug = AHC_DEBUG;
#endif
static void ahc_freebsd_intr(void *arg);
#if UNUSED
static void ahc_dump_targcmd(struct target_cmd *cmd);
#endif
static void ahc_action(struct cam_sim *sim, union ccb *ccb);
static void ahc_get_tran_settings(struct ahc_softc *ahc,
int our_id, char channel,
struct ccb_trans_settings *cts);
static void ahc_async(void *callback_arg, uint32_t code,
struct cam_path *path, void *arg);
static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
int nsegments, int error);
static void ahc_poll(struct cam_sim *sim);
static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
struct ccb_scsiio *csio, struct scb *scb);
static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
union ccb *ccb);
static int ahc_create_path(struct ahc_softc *ahc,
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
char channel, u_int target, u_int lun,
struct cam_path **path);
static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
static int
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
u_int lun, struct cam_path **path)
{
path_id_t path_id;
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
if (channel == 'B')
path_id = cam_sim_path(ahc->platform_data->sim_b);
else
path_id = cam_sim_path(ahc->platform_data->sim);
return (xpt_create_path(path, /*periph*/NULL,
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
path_id, target, lun));
}
/*
* Attach all the sub-devices we can find
*/
int
ahc_attach(struct ahc_softc *ahc)
{
char ahc_info[256];
struct ccb_setasync csa;
struct cam_devq *devq;
int bus_id;
int bus_id2;
struct cam_sim *sim;
struct cam_sim *sim2;
struct cam_path *path;
struct cam_path *path2;
long s;
int count;
int error;
count = 0;
sim = NULL;
sim2 = NULL;
ahc_controller_info(ahc, ahc_info);
printf("%s\n", ahc_info);
ahc_lock(ahc, &s);
/* Hook up our interrupt handler */
if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
INTR_TYPE_CAM, ahc_freebsd_intr, ahc,
&ahc->platform_data->ih)) != 0) {
device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
error);
goto fail;
}
/*
* Attach secondary channel first if the user has
* declared it the primary channel.
*/
if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
bus_id = 1;
bus_id2 = 0;
} else {
bus_id = 0;
bus_id2 = 1;
}
/*
* Create the device queue for our SIM(s).
*/
devq = cam_simq_alloc(AHC_SCB_MAX - 1);
if (devq == NULL)
goto fail;
/*
* Construct our first channel SIM entry
*/
sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
device_get_unit(ahc->dev_softc),
1, AHC_SCB_MAX - 1, devq);
if (sim == NULL) {
cam_simq_free(devq);
goto fail;
}
if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
cam_sim_free(sim, /*free_devq*/TRUE);
sim = NULL;
goto fail;
}
if (xpt_create_path(&path, /*periph*/NULL,
cam_sim_path(sim), CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
xpt_bus_deregister(cam_sim_path(sim));
cam_sim_free(sim, /*free_devq*/TRUE);
sim = NULL;
goto fail;
}
xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_LOST_DEVICE;
csa.callback = ahc_async;
csa.callback_arg = sim;
xpt_action((union ccb *)&csa);
count++;
if (ahc->features & AHC_TWIN) {
sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
ahc, device_get_unit(ahc->dev_softc), 1,
AHC_SCB_MAX - 1, devq);
if (sim2 == NULL) {
printf("ahc_attach: Unable to attach second "
"bus due to resource shortage");
goto fail;
}
if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
printf("ahc_attach: Unable to attach second "
"bus due to resource shortage");
/*
* We do not want to destroy the device queue
* because the first bus is using it.
*/
cam_sim_free(sim2, /*free_devq*/FALSE);
goto fail;
}
if (xpt_create_path(&path2, /*periph*/NULL,
cam_sim_path(sim2),
CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
xpt_bus_deregister(cam_sim_path(sim2));
cam_sim_free(sim2, /*free_devq*/FALSE);
sim2 = NULL;
goto fail;
}
xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_LOST_DEVICE;
csa.callback = ahc_async;
csa.callback_arg = sim2;
xpt_action((union ccb *)&csa);
count++;
}
fail:
if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
ahc->platform_data->sim_b = sim;
ahc->platform_data->path_b = path;
ahc->platform_data->sim = sim2;
ahc->platform_data->path = path2;
} else {
ahc->platform_data->sim = sim;
ahc->platform_data->path = path;
ahc->platform_data->sim_b = sim2;
ahc->platform_data->path_b = path2;
}
ahc_unlock(ahc, &s);
if (count != 0)
/* We have to wait until after any system dumps... */
EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
ahc, SHUTDOWN_PRI_DEFAULT);
return (count);
}
/*
* Catch an interrupt from the adapter
*/
void
ahc_freebsd_intr(void *arg)
{
struct ahc_softc *ahc;
ahc = (struct ahc_softc *)arg;
ahc_intr(ahc);
}
/*
* We have an scb which has been processed by the
* adaptor, now we look to see how the operation
* went.
*/
void
ahc_done(struct ahc_softc *ahc, struct scb *scb)
{
union ccb *ccb;
CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
("ahc_done - scb %d\n", scb->hscb->tag));
ccb = scb->io_ctx;
LIST_REMOVE(scb, pending_links);
if (ccb->ccb_h.func_code == XPT_SCSI_IO
&& ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
|| ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
&& (ahc->features & AHC_SCB_BTT) == 0) {
struct scb_tailq *untagged_q;
untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
TAILQ_REMOVE(untagged_q, scb, links.tqe);
ahc_run_untagged_queue(ahc, untagged_q);
}
untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_POSTREAD;
else
op = BUS_DMASYNC_POSTWRITE;
bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
}
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
ccb->ccb_h.status |= CAM_REQ_CMP;
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
ahc_free_scb(ahc, scb);
xpt_done(ccb);
return;
}
/*
* If the recovery SCB completes, we have to be
* out of our timeout.
*/
if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
struct scb *list_scb;
/*
* We were able to complete the command successfully,
* so reinstate the timeouts for all other pending
* commands.
*/
LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
union ccb *ccb;
ccb = list_scb->io_ctx;
ccb->ccb_h.timeout_ch =
timeout(ahc_timeout, list_scb,
(ccb->ccb_h.timeout * hz)/1000);
}
/*
* Ensure that we didn't put a second instance of this
* SCB into the QINFIFO.
*/
ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
SCB_GET_CHANNEL(ahc, scb),
SCB_GET_LUN(scb), scb->hscb->tag,
ROLE_INITIATOR, /*status*/0,
SEARCH_REMOVE);
if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
|| ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
ahc_print_path(ahc, scb);
printf("no longer in timeout, status = %x\n",
ccb->ccb_h.status);
}
/* Don't clobber any existing error state */
if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
ccb->ccb_h.status |= CAM_REQ_CMP;
} else if ((scb->flags & SCB_SENSE) != 0) {
/*
* We performed autosense retrieval.
*
* Zero any sense not transferred by the
* device. The SCSI spec mandates that any
* untransfered data should be assumed to be
* zero. Complete the 'bounce' of sense information
* through buffers accessible via bus-space by
* copying it into the clients csio.
*/
memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
memcpy(&ccb->csio.sense_data,
&ahc->scb_data->sense[scb->hscb->tag],
(scb->sg_list->len & AHC_SG_LEN_MASK)
- ccb->csio.sense_resid);
scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
ahc_free_scb(ahc, scb);
xpt_done(ccb);
}
static void
ahc_action(struct cam_sim *sim, union ccb *ccb)
{
struct ahc_softc *ahc;
struct tmode_lstate *lstate;
u_int target_id;
u_int our_id;
long s;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
ahc = (struct ahc_softc *)cam_sim_softc(sim);
target_id = ccb->ccb_h.target_id;
our_id = SIM_SCSI_ID(ahc, sim);
switch (ccb->ccb_h.func_code) {
/* Common cases first */
case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
{
struct tmode_tstate *tstate;
cam_status status;
status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
&lstate, TRUE);
if (status != CAM_REQ_CMP) {
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
/* Response from the black hole device */
tstate = NULL;
lstate = ahc->black_hole;
} else {
ccb->ccb_h.status = status;
xpt_done(ccb);
break;
}
}
if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
ahc_lock(ahc, &s);
SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
sim_links.sle);
ccb->ccb_h.status = CAM_REQ_INPROG;
if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
ahc_run_tqinfifo(ahc, /*paused*/FALSE);
ahc_unlock(ahc, &s);
break;
}
/*
* The target_id represents the target we attempt to
* select. In target mode, this is the initiator of
* the original command.
*/
our_id = target_id;
target_id = ccb->csio.init_id;
/* FALLTHROUGH */
}
case XPT_SCSI_IO: /* Execute the requested I/O operation */
case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
{
struct scb *scb;
struct hardware_scb *hscb;
/*
* get an scb to use.
*/
ahc_lock(ahc, &s);
if ((scb = ahc_get_scb(ahc)) == NULL) {
ahc->flags |= AHC_RESOURCE_SHORTAGE;
ahc_unlock(ahc, &s);
xpt_freeze_simq(sim, /*count*/1);
ccb->ccb_h.status = CAM_REQUEUE_REQ;
xpt_done(ccb);
return;
}
ahc_unlock(ahc, &s);
hscb = scb->hscb;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
("start scb(%p)\n", scb));
scb->io_ctx = ccb;
/*
* So we can find the SCB when an abort is requested
*/
ccb->ccb_h.ccb_scb_ptr = scb;
ccb->ccb_h.ccb_ahc_ptr = ahc;
/*
* Put all the arguments for the xfer in the scb
*/
hscb->control = 0;
hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
hscb->lun = ccb->ccb_h.target_lun;
if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
hscb->cdb_len = 0;
scb->flags |= SCB_DEVICE_RESET;
hscb->control |= MK_MESSAGE;
ahc_execute_scb(scb, NULL, 0, 0);
} else {
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
struct target_data *tdata;
tdata = &hscb->shared_data.tdata;
if (ahc->pending_device == lstate) {
scb->flags |= SCB_TARGET_IMMEDIATE;
ahc->pending_device = NULL;
}
hscb->control |= TARGET_SCB;
tdata->target_phases = IDENTIFY_SEEN;
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
tdata->target_phases |= SPHASE_PENDING;
tdata->scsi_status =
ccb->csio.scsi_status;
}
tdata->initiator_tag = ccb->csio.tag_id;
}
if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
hscb->control |= ccb->csio.tag_action;
ahc_setup_data(ahc, sim, &ccb->csio, scb);
}
break;
}
case XPT_NOTIFY_ACK:
case XPT_IMMED_NOTIFY:
{
struct tmode_tstate *tstate;
struct tmode_lstate *lstate;
cam_status status;
status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
&lstate, TRUE);
if (status != CAM_REQ_CMP) {
ccb->ccb_h.status = status;
xpt_done(ccb);
break;
}
SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
sim_links.sle);
ccb->ccb_h.status = CAM_REQ_INPROG;
ahc_send_lstate_events(ahc, lstate);
break;
}
case XPT_EN_LUN: /* Enable LUN as a target */
ahc_handle_en_lun(ahc, sim, ccb);
xpt_done(ccb);
break;
case XPT_ABORT: /* Abort the specified CCB */
{
ahc_abort_ccb(ahc, sim, ccb);
break;
}
case XPT_SET_TRAN_SETTINGS:
{
#ifdef AHC_NEW_TRAN_SETTINGS
struct ahc_devinfo devinfo;
struct ccb_trans_settings *cts;
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
struct ahc_initiator_tinfo *tinfo;
struct tmode_tstate *tstate;
uint16_t *discenable;
uint16_t *tagenable;
u_int update_type;
cts = &ccb->cts;
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
cts->ccb_h.target_id,
cts->ccb_h.target_lun,
SIM_CHANNEL(ahc, sim),
ROLE_UNKNOWN);
tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
devinfo.our_scsiid,
devinfo.target, &tstate);
update_type = 0;
if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
update_type |= AHC_TRANS_GOAL;
discenable = &tstate->discenable;
tagenable = &tstate->tagenable;
tinfo->current.protocol_version =
cts->protocol_version;
tinfo->current.transport_version =
cts->transport_version;
tinfo->goal.protocol_version =
cts->protocol_version;
tinfo->goal.transport_version =
cts->transport_version;
} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
update_type |= AHC_TRANS_USER;
discenable = &ahc->user_discenable;
tagenable = &ahc->user_tagenable;
tinfo->user.protocol_version =
cts->protocol_version;
tinfo->user.transport_version =
cts->transport_version;
} else {
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
ahc_lock(ahc, &s);
if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
*discenable |= devinfo.target_mask;
else
*discenable &= ~devinfo.target_mask;
}
if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
*tagenable |= devinfo.target_mask;
else
*tagenable &= ~devinfo.target_mask;
}
if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
ahc_validate_width(ahc, &spi->bus_width);
ahc_set_width(ahc, &devinfo, spi->bus_width,
update_type, /*paused*/FALSE);
}
if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
if (update_type == AHC_TRANS_USER)
spi->ppr_options = tinfo->user.ppr_options;
else
spi->ppr_options = tinfo->goal.ppr_options;
}
if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
if (update_type == AHC_TRANS_USER)
spi->sync_offset = tinfo->user.offset;
else
spi->sync_offset = tinfo->goal.offset;
}
if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
if (update_type == AHC_TRANS_USER)
spi->sync_period = tinfo->user.period;
else
spi->sync_period = tinfo->goal.period;
}
if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
|| ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
struct ahc_syncrate *syncrate;
u_int maxsync;
if ((ahc->features & AHC_ULTRA2) != 0)
maxsync = AHC_SYNCRATE_DT;
else if ((ahc->features & AHC_ULTRA) != 0)
maxsync = AHC_SYNCRATE_ULTRA;
else
maxsync = AHC_SYNCRATE_FAST;
syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
&spi->ppr_options,
maxsync);
ahc_validate_offset(ahc, syncrate, &spi->sync_offset,
spi->bus_width);
/* We use a period of 0 to represent async */
if (spi->sync_offset == 0) {
spi->sync_period = 0;
spi->ppr_options = 0;
}
ahc_set_syncrate(ahc, &devinfo, syncrate,
spi->sync_period, spi->sync_offset,
spi->ppr_options, update_type,
/*paused*/FALSE);
}
ahc_unlock(ahc, &s);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
#else
struct ahc_devinfo devinfo;
struct ccb_trans_settings *cts;
struct ahc_initiator_tinfo *tinfo;
struct tmode_tstate *tstate;
uint16_t *discenable;
uint16_t *tagenable;
u_int update_type;
long s;
cts = &ccb->cts;
ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
cts->ccb_h.target_id,
cts->ccb_h.target_lun,
SIM_CHANNEL(ahc, sim),
ROLE_UNKNOWN);
tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
devinfo.our_scsiid,
devinfo.target, &tstate);
update_type = 0;
if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
update_type |= AHC_TRANS_GOAL;
discenable = &tstate->discenable;
tagenable = &tstate->tagenable;
} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
update_type |= AHC_TRANS_USER;
discenable = &ahc->user_discenable;
tagenable = &ahc->user_tagenable;
} else {
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
ahc_lock(ahc, &s);
if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
*discenable |= devinfo.target_mask;
else
*discenable &= ~devinfo.target_mask;
}
if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
*tagenable |= devinfo.target_mask;
else
*tagenable &= ~devinfo.target_mask;
}
if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
ahc_validate_width(ahc, &cts->bus_width);
ahc_set_width(ahc, &devinfo, cts->bus_width,
update_type, /*paused*/FALSE);
}
if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
if (update_type == AHC_TRANS_USER)
cts->sync_offset = tinfo->user.offset;
else
cts->sync_offset = tinfo->goal.offset;
}
if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
if (update_type == AHC_TRANS_USER)
cts->sync_period = tinfo->user.period;
else
cts->sync_period = tinfo->goal.period;
}
if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
|| ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
struct ahc_syncrate *syncrate;
u_int ppr_options;
u_int maxsync;
if ((ahc->features & AHC_ULTRA2) != 0)
maxsync = AHC_SYNCRATE_DT;
else if ((ahc->features & AHC_ULTRA) != 0)
maxsync = AHC_SYNCRATE_ULTRA;
else
maxsync = AHC_SYNCRATE_FAST;
ppr_options = 0;
if (cts->sync_period <= 9)
ppr_options = MSG_EXT_PPR_DT_REQ;
syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
&ppr_options,
maxsync);
ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
MSG_EXT_WDTR_BUS_8_BIT);
/* We use a period of 0 to represent async */
if (cts->sync_offset == 0) {
cts->sync_period = 0;
ppr_options = 0;
}
if (ppr_options == MSG_EXT_PPR_DT_REQ
&& tinfo->user.transport_version >= 3) {
tinfo->goal.transport_version =
tinfo->user.transport_version;
tinfo->current.transport_version =
tinfo->user.transport_version;
}
ahc_set_syncrate(ahc, &devinfo, syncrate,
cts->sync_period, cts->sync_offset,
ppr_options, update_type,
/*paused*/FALSE);
}
ahc_unlock(ahc, &s);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
#endif
break;
}
case XPT_GET_TRAN_SETTINGS:
/* Get default/user set transfer settings for the target */
{
ahc_lock(ahc, &s);
ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
SIM_CHANNEL(ahc, sim), &ccb->cts);
ahc_unlock(ahc, &s);
xpt_done(ccb);
break;
}
case XPT_CALC_GEOMETRY:
{
struct ccb_calc_geometry *ccg;
uint32_t size_mb;
uint32_t secs_per_cylinder;
int extended;
ccg = &ccb->ccg;
size_mb = ccg->volume_size
/ ((1024L * 1024L) / ccg->block_size);
extended = SIM_IS_SCSIBUS_B(ahc, sim)
? ahc->flags & AHC_EXTENDED_TRANS_B
: ahc->flags & AHC_EXTENDED_TRANS_A;
if (size_mb > 1024 && extended) {
ccg->heads = 255;
ccg->secs_per_track = 63;
} else {
ccg->heads = 64;
ccg->secs_per_track = 32;
}
secs_per_cylinder = ccg->heads * ccg->secs_per_track;
ccg->cylinders = ccg->volume_size / secs_per_cylinder;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_RESET_BUS: /* Reset the specified SCSI bus */
{
int found;
ahc_lock(ahc, &s);
found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
/*initiate reset*/TRUE);
ahc_unlock(ahc, &s);
if (bootverbose) {
xpt_print_path(SIM_PATH(ahc, sim));
printf("SCSI bus reset delivered. "
"%d SCBs aborted.\n", found);
}
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_TERM_IO: /* Terminate the I/O process */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
case XPT_PATH_INQ: /* Path routing inquiry */
{
struct ccb_pathinq *cpi = &ccb->cpi;
cpi->version_num = 1; /* XXX??? */
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
if ((ahc->features & AHC_WIDE) != 0)
cpi->hba_inquiry |= PI_WIDE_16;
if ((ahc->flags & AHC_TARGETMODE) != 0) {
cpi->target_sprt = PIT_PROCESSOR
| PIT_DISCONNECT
| PIT_TERM_IO;
} else {
cpi->target_sprt = 0;
}
cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
? 0 : PIM_NOINITIATOR;
cpi->hba_eng_cnt = 0;
cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
cpi->max_lun = 64;
if (SIM_IS_SCSIBUS_B(ahc, sim)) {
cpi->initiator_id = ahc->our_id_b;
if ((ahc->flags & AHC_RESET_BUS_B) == 0)
cpi->hba_misc |= PIM_NOBUSRESET;
} else {
cpi->initiator_id = ahc->our_id;
if ((ahc->flags & AHC_RESET_BUS_A) == 0)
cpi->hba_misc |= PIM_NOBUSRESET;
}
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 3300;
strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
#ifdef AHC_NEW_TRAN_SETTINGS
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_2;
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
if ((ahc->features & AHC_DT) != 0) {
cpi->transport_version = 3;
cpi->xport_specific.spi.ppr_options =
SID_SPI_CLOCK_DT_ST;
}
#endif
cpi->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
}
static void
ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
struct ccb_trans_settings *cts)
{
#ifdef AHC_NEW_TRAN_SETTINGS
struct ahc_devinfo devinfo;
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_spi *spi;
struct ahc_initiator_tinfo *targ_info;
struct tmode_tstate *tstate;
struct ahc_transinfo *tinfo;
scsi = &cts->proto_specific.scsi;
spi = &cts->xport_specific.spi;
ahc_compile_devinfo(&devinfo, our_id,
cts->ccb_h.target_id,
cts->ccb_h.target_lun,
channel, ROLE_UNKNOWN);
targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
devinfo.our_scsiid,
devinfo.target, &tstate);
if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
tinfo = &targ_info->current;
else
tinfo = &targ_info->user;
scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
if (cts->type == CTS_TYPE_USER_SETTINGS) {
if ((ahc->user_discenable & devinfo.target_mask) != 0)
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
if ((ahc->user_tagenable & devinfo.target_mask) != 0)
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
} else {
if ((tstate->discenable & devinfo.target_mask) != 0)
spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
if ((tstate->tagenable & devinfo.target_mask) != 0)
scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
}
cts->protocol_version = tinfo->protocol_version;
cts->transport_version = tinfo->transport_version;
spi->sync_period = tinfo->period;
spi->sync_offset = tinfo->offset;
spi->bus_width = tinfo->width;
spi->ppr_options = tinfo->ppr_options;
cts->protocol = PROTO_SCSI;
cts->transport = XPORT_SPI;
spi->valid = CTS_SPI_VALID_SYNC_RATE
| CTS_SPI_VALID_SYNC_OFFSET
| CTS_SPI_VALID_BUS_WIDTH
| CTS_SPI_VALID_PPR_OPTIONS;
if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
scsi->valid = CTS_SCSI_VALID_TQ;
spi->valid |= CTS_SPI_VALID_DISC;
} else {
scsi->valid = 0;
}
cts->ccb_h.status = CAM_REQ_CMP;
#else
struct ahc_devinfo devinfo;
struct ahc_initiator_tinfo *targ_info;
struct tmode_tstate *tstate;
struct ahc_transinfo *tinfo;
long s;
ahc_compile_devinfo(&devinfo, our_id,
cts->ccb_h.target_id,
cts->ccb_h.target_lun,
channel, ROLE_UNKNOWN);
targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
devinfo.our_scsiid,
devinfo.target, &tstate);
if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
tinfo = &targ_info->current;
else
tinfo = &targ_info->user;
ahc_lock(ahc, &s);
cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
if ((ahc->user_discenable & devinfo.target_mask) != 0)
cts->flags |= CCB_TRANS_DISC_ENB;
if ((ahc->user_tagenable & devinfo.target_mask) != 0)
cts->flags |= CCB_TRANS_TAG_ENB;
} else {
if ((tstate->discenable & devinfo.target_mask) != 0)
cts->flags |= CCB_TRANS_DISC_ENB;
if ((tstate->tagenable & devinfo.target_mask) != 0)
cts->flags |= CCB_TRANS_TAG_ENB;
}
cts->sync_period = tinfo->period;
cts->sync_offset = tinfo->offset;
cts->bus_width = tinfo->width;
ahc_unlock(ahc, &s);
cts->valid = CCB_TRANS_SYNC_RATE_VALID
| CCB_TRANS_SYNC_OFFSET_VALID
| CCB_TRANS_BUS_WIDTH_VALID;
if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
cts->ccb_h.status = CAM_REQ_CMP;
#endif
}
static void
ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
{
struct ahc_softc *ahc;
struct cam_sim *sim;
sim = (struct cam_sim *)callback_arg;
ahc = (struct ahc_softc *)cam_sim_softc(sim);
switch (code) {
case AC_LOST_DEVICE:
{
struct ahc_devinfo devinfo;
long s;
ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
xpt_path_target_id(path),
xpt_path_lun_id(path),
SIM_CHANNEL(ahc, sim),
ROLE_UNKNOWN);
/*
* Revert to async/narrow transfers
* for the next device.
*/
ahc_lock(ahc, &s);
ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
/*period*/0, /*offset*/0, /*ppr_options*/0,
AHC_TRANS_GOAL|AHC_TRANS_CUR,
/*paused*/FALSE);
ahc_unlock(ahc, &s);
break;
}
default:
break;
}
}
static void
ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
int error)
{
struct scb *scb;
union ccb *ccb;
struct ahc_softc *ahc;
struct ahc_initiator_tinfo *tinfo;
struct tmode_tstate *tstate;
u_int mask;
long s;
scb = (struct scb *)arg;
ccb = scb->io_ctx;
ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
if (error != 0) {
if (error == EFBIG)
ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
else
ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
if (nsegments != 0)
bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
ahc_lock(ahc, &s);
ahc_free_scb(ahc, scb);
ahc_unlock(ahc, &s);
xpt_done(ccb);
return;
}
if (nsegments != 0) {
struct ahc_dma_seg *sg;
bus_dma_segment_t *end_seg;
bus_dmasync_op_t op;
end_seg = dm_segs + nsegments;
/* Copy the segments into our SG list */
sg = scb->sg_list;
while (dm_segs < end_seg) {
sg->addr = dm_segs->ds_addr;
/* XXX Add in the 5th byte of the address later. */
sg->len = dm_segs->ds_len;
sg++;
dm_segs++;
}
/*
* Note where to find the SG entries in bus space.
* We also set the full residual flag which the
* sequencer will clear as soon as a data transfer
* occurs.
*/
scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_PREREAD;
else
op = BUS_DMASYNC_PREWRITE;
bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
struct target_data *tdata;
tdata = &scb->hscb->shared_data.tdata;
tdata->target_phases |= DPHASE_PENDING;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
tdata->data_phase = P_DATAOUT;
else
tdata->data_phase = P_DATAIN;
/*
* If the transfer is of an odd length and in the
* "in" direction (scsi->HostBus), then it may
* trigger a bug in the 'WideODD' feature of
* non-Ultra2 chips. Force the total data-length
* to be even by adding an extra, 1 byte, SG,
* element. We do this even if we are not currently
* negotiated wide as negotiation could occur before
* this command is executed.
*/
if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
&& (ccb->csio.dxfer_len & 0x1) != 0
&& (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
nsegments++;
if (nsegments > AHC_NSEG) {
ahc_set_transaction_status(scb,
CAM_REQ_TOO_BIG);
bus_dmamap_unload(ahc->buffer_dmat,
scb->dmamap);
ahc_lock(ahc, &s);
ahc_free_scb(ahc, scb);
ahc_unlock(ahc, &s);
xpt_done(ccb);
return;
}
sg->addr = ahc->dma_bug_buf;
sg->len = 1;
sg++;
}
}
sg--;
sg->len |= AHC_DMA_LAST_SEG;
/* Copy the first SG into the "current" data pointer area */
scb->hscb->dataptr = scb->sg_list->addr;
scb->hscb->datacnt = scb->sg_list->len;
} else {
scb->hscb->sgptr = SG_LIST_NULL;
scb->hscb->dataptr = 0;
scb->hscb->datacnt = 0;
}
scb->sg_count = nsegments;
ahc_lock(ahc, &s);
/*
* Last time we need to check if this SCB needs to
* be aborted.
*/
if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
if (nsegments != 0)
bus_dmamap_unload(ahc->buffer_dmat,
scb->dmamap);
ahc_free_scb(ahc, scb);
ahc_unlock(ahc, &s);
xpt_done(ccb);
return;
}
tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
SCSIID_OUR_ID(scb->hscb->scsiid),
ccb->ccb_h.target_id, &tstate);
mask = SCB_GET_TARGET_MASK(ahc, scb);
scb->hscb->scsirate = tinfo->scsirate;
scb->hscb->scsioffset = tinfo->current.offset;
if ((tstate->ultraenb & mask) != 0)
scb->hscb->control |= ULTRAENB;
if ((tstate->discenable & mask) != 0
&& (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
scb->hscb->control |= DISCENB;
if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
&& (tinfo->current.width != 0 || tinfo->current.period != 0)) {
scb->flags |= SCB_NEGOTIATE;
scb->hscb->control |= MK_MESSAGE;
}
LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
ccb->ccb_h.status |= CAM_SIM_QUEUED;
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
ccb->ccb_h.timeout = 5 * 1000;
ccb->ccb_h.timeout_ch =
timeout(ahc_timeout, (caddr_t)scb,
(ccb->ccb_h.timeout * hz) / 1000);
}
/*
* We only allow one untagged transaction
* per target in the initiator role unless
* we are storing a full busy target *lun*
* table in SCB space.
*/
if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
&& (ahc->features & AHC_SCB_BTT) == 0) {
struct scb_tailq *untagged_q;
untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
if (TAILQ_FIRST(untagged_q) != scb) {
ahc_unlock(ahc, &s);
return;
}
}
scb->flags |= SCB_ACTIVE;
if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
pause_sequencer(ahc);
if ((ahc->flags & AHC_PAGESCBS) == 0)
ahc_outb(ahc, SCBPTR, scb->hscb->tag);
ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
unpause_sequencer(ahc);
} else {
ahc_queue_scb(ahc, scb);
}
ahc_unlock(ahc, &s);
}
static void
ahc_poll(struct cam_sim *sim)
{
ahc_intr(cam_sim_softc(sim));
}
static void
ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
struct ccb_scsiio *csio, struct scb *scb)
{
struct hardware_scb *hscb;
struct ccb_hdr *ccb_h;
hscb = scb->hscb;
ccb_h = &csio->ccb_h;
if (ccb_h->func_code == XPT_SCSI_IO) {
hscb->cdb_len = csio->cdb_len;
if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
if (hscb->cdb_len > sizeof(hscb->cdb32)
|| (ccb_h->flags & CAM_CDB_PHYS) != 0) {
u_long s;
ahc_set_transaction_status(scb,
CAM_REQ_INVALID);
ahc_lock(ahc, &s);
ahc_free_scb(ahc, scb);
ahc_unlock(ahc, &s);
xpt_done((union ccb *)csio);
return;
}
if (hscb->cdb_len > 12) {
memcpy(hscb->cdb32,
csio->cdb_io.cdb_ptr,
hscb->cdb_len);
hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
} else {
memcpy(hscb->shared_data.cdb,
csio->cdb_io.cdb_ptr,
hscb->cdb_len);
}
} else {
if (hscb->cdb_len > 12) {
memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
hscb->cdb_len);
hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
} else {
memcpy(hscb->shared_data.cdb,
csio->cdb_io.cdb_bytes,
hscb->cdb_len);
}
}
}
/* Only use S/G if there is a transfer */
if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
/* We've been given a pointer to a single buffer */
if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
int s;
int error;
s = splsoftvm();
error = bus_dmamap_load(ahc->buffer_dmat,
scb->dmamap,
csio->data_ptr,
csio->dxfer_len,
ahc_execute_scb,
scb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(sim,
/*count*/1);
scb->io_ctx->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
splx(s);
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
panic("ahc_setup_data - Transfer size "
"larger than can device max");
seg.ds_addr = (bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
ahc_execute_scb(scb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
panic("ahc_setup_data - Physical segment "
"pointers unsupported");
if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
panic("ahc_setup_data - Virtual segment "
"addresses unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
}
} else {
ahc_execute_scb(scb, NULL, 0, 0);
}
}
static void
ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
struct scb *list_scb;
scb->flags |= SCB_RECOVERY_SCB;
/*
* Take all queued, but not sent SCBs out of the equation.
* Also ensure that no new CCBs are queued to us while we
* try to fix this problem.
*/
if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
}
/*
* Go through all of our pending SCBs and remove
* any scheduled timeouts for them. We will reschedule
* them after we've successfully fixed this problem.
*/
LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
union ccb *ccb;
ccb = list_scb->io_ctx;
untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
}
}
}
void
ahc_timeout(void *arg)
{
struct scb *scb;
struct ahc_softc *ahc;
long s;
int found;
u_int last_phase;
int target;
int lun;
int i;
char channel;
scb = (struct scb *)arg;
ahc = (struct ahc_softc *)scb->io_ctx->ccb_h.ccb_ahc_ptr;
ahc_lock(ahc, &s);
/*
* Ensure that the card doesn't do anything
* behind our back. Also make sure that we
* didn't "just" miss an interrupt that would
* affect this timeout.
*/
do {
ahc_intr(ahc);
pause_sequencer(ahc);
} while (ahc_inb(ahc, INTSTAT) & INT_PEND);
/* Make sure the sequencer is in a safe location. */
ahc_clear_critical_section(ahc);
ahc_print_path(ahc, scb);
if ((scb->flags & SCB_ACTIVE) == 0) {
/* Previous timeout took care of me already */
printf("Timedout SCB %d handled by another timeout\n",
scb->hscb->tag);
unpause_sequencer(ahc);
ahc_unlock(ahc, &s);
return;
}
target = SCB_GET_TARGET(ahc, scb);
channel = SCB_GET_CHANNEL(ahc, scb);
lun = SCB_GET_LUN(scb);
printf("SCB 0x%x - timed out ", scb->hscb->tag);
/*
* Take a snapshot of the bus state and print out
* some information so we can track down driver bugs.
*/
last_phase = ahc_inb(ahc, LASTPHASE);
for (i = 0; i < num_phases; i++) {
if (last_phase == phase_table[i].phase)
break;
}
printf("%s", phase_table[i].phasemsg);
printf(", SEQADDR == 0x%x\n",
ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
if (scb->sg_count > 0) {
for (i = 0; i < scb->sg_count; i++) {
printf("sg[%d] - Addr 0x%x : Length %d\n",
i,
scb->sg_list[i].addr,
scb->sg_list[i].len & AHC_SG_LEN_MASK);
}
}
if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
/*
* Been down this road before.
* Do a full bus reset.
*/
bus_reset:
ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
printf("%s: Issued Channel %c Bus Reset. "
"%d SCBs aborted\n", ahc_name(ahc), channel, found);
} else {
/*
* If we are a target, transition to bus free and report
* the timeout.
*
* The target/initiator that is holding up the bus may not
* be the same as the one that triggered this timeout
* (different commands have different timeout lengths).
* If the bus is idle and we are actiing as the initiator
* for this request, queue a BDR message to the timed out
* target. Otherwise, if the timed out transaction is
* active:
* Initiator transaction:
* Stuff the message buffer with a BDR message and assert
* ATN in the hopes that the target will let go of the bus
* and go to the mesgout phase. If this fails, we'll
* get another timeout 2 seconds later which will attempt
* a bus reset.
*
* Target transaction:
* Transition to BUS FREE and report the error.
* It's good to be the target!
*/
u_int active_scb_index;
active_scb_index = ahc_inb(ahc, SCB_TAG);
if (last_phase != P_BUSFREE
&& (active_scb_index < ahc->scb_data->numscbs)) {
struct scb *active_scb;
/*
* If the active SCB is not from our device,
* assume that another device is hogging the bus
* and wait for it's timeout to expire before
* taking additional action.
*/
active_scb = ahc_lookup_scb(ahc, active_scb_index);
if (active_scb->hscb->scsiid != scb->hscb->scsiid
|| active_scb->hscb->lun != scb->hscb->lun) {
struct ccb_hdr *ccbh;
u_int newtimeout;
ahc_print_path(ahc, scb);
printf("Other SCB Timeout%s",
(scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
? " again\n" : "\n");
scb->flags |= SCB_OTHERTCL_TIMEOUT;
newtimeout =
MAX(active_scb->io_ctx->ccb_h.timeout,
scb->io_ctx->ccb_h.timeout);
ccbh = &scb->io_ctx->ccb_h;
scb->io_ctx->ccb_h.timeout_ch =
timeout(ahc_timeout, scb,
(newtimeout * hz) / 1000);
ahc_unlock(ahc, &s);
return;
}
/* It's us */
if ((scb->hscb->control & TARGET_SCB) != 0) {
/*
* Send back any queued up transactions
* and properly record the error condition.
*/
ahc_freeze_devq(ahc, scb);
ahc_set_transaction_status(scb,
CAM_CMD_TIMEOUT);
ahc_freeze_scb(scb);
ahc_done(ahc, scb);
/* Will clear us from the bus */
restart_sequencer(ahc);
ahc_unlock(ahc, &s);
return;
}
ahc_set_recoveryscb(ahc, active_scb);
Clean up error recovery code: aic7xxx.c: In target mode, reset the TQINPOS on every restart of the sequencer. In the past we did this only during a bus reset, but there are other reasons the sequencer might be reset. In ahc_clear_critical_section(), disable pausing chip interrupts while we step the sequencer out of a critical section. This avoids the possibility of getting a pausing interrupt (unexpected bus free, bus reset, etc.) that would prevent the sequencer from stepping. Send the correct async notifications in the case of a BDR or bus reset. In ahc_loadseq(), correct the calculation of our critical sections. In some cases, the sections would be larger than needed. aic7xxx.h: Remove an unused SCB flag. aic7xxx.seq: MK_MESSAGE is cleared by the kernel, there is no need to waste a sequencer instruction clearing it. aic7xxx_freebsd.c: Go through the host message loop instead of issuing a single byte message directly in the ahc_timeout() case where we are currently on the bus to the device. The effect is the same, but this way we get a nice printf saying that an expected BDR was delivered instead of an unexpected bus free. If we are requeuing an SCB for an error recovery action, be sure to set the DISCONNECTED flag in the in-core version of the SCB. This ensures that, in the SCB-paging case, the sequencer will still recognize the reselection as valid even if the version of the SCB with this flag set was never previously paged out to system memory. In the non-paging case, set the MK_MESSAGE flag in SCB_CONTROL directly. aic7xxx_pci.c: Enable the Memeory Write and Invalidate bug workaround for all aic7880 chips with revs < 1. This bug is rarely triggered in FreeBSD as most transfers end on cache-aligned boundaries, but a recheck of my references indicates that these chips are affected.
2000-10-09 01:46:01 +00:00
ahc_outb(ahc, MSG_OUT, HOST_MSG);
ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
ahc_print_path(ahc, active_scb);
printf("BDR message in message buffer\n");
active_scb->flags |= SCB_DEVICE_RESET;
active_scb->io_ctx->ccb_h.timeout_ch =
timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
unpause_sequencer(ahc);
} else {
int disconnected;
/* XXX Shouldn't panic. Just punt instead */
if ((scb->hscb->control & TARGET_SCB) != 0)
panic("Timed-out target SCB but bus idle");
if (last_phase != P_BUSFREE
&& (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
/* XXX What happened to the SCB? */
/* Hung target selection. Goto busfree */
printf("%s: Hung target selection\n",
ahc_name(ahc));
restart_sequencer(ahc);
ahc_unlock(ahc, &s);
return;
}
if (ahc_search_qinfifo(ahc, target, channel, lun,
scb->hscb->tag, ROLE_INITIATOR,
/*status*/0, SEARCH_COUNT) > 0) {
disconnected = FALSE;
} else {
disconnected = TRUE;
}
if (disconnected) {
struct scb *prev_scb;
ahc_set_recoveryscb(ahc, scb);
/*
* Actually re-queue this SCB in an attempt
* to select the device before it reconnects.
* In either case (selection or reselection),
* we will now issue a target reset to the
* timed-out device.
*
Clean up error recovery code: aic7xxx.c: In target mode, reset the TQINPOS on every restart of the sequencer. In the past we did this only during a bus reset, but there are other reasons the sequencer might be reset. In ahc_clear_critical_section(), disable pausing chip interrupts while we step the sequencer out of a critical section. This avoids the possibility of getting a pausing interrupt (unexpected bus free, bus reset, etc.) that would prevent the sequencer from stepping. Send the correct async notifications in the case of a BDR or bus reset. In ahc_loadseq(), correct the calculation of our critical sections. In some cases, the sections would be larger than needed. aic7xxx.h: Remove an unused SCB flag. aic7xxx.seq: MK_MESSAGE is cleared by the kernel, there is no need to waste a sequencer instruction clearing it. aic7xxx_freebsd.c: Go through the host message loop instead of issuing a single byte message directly in the ahc_timeout() case where we are currently on the bus to the device. The effect is the same, but this way we get a nice printf saying that an expected BDR was delivered instead of an unexpected bus free. If we are requeuing an SCB for an error recovery action, be sure to set the DISCONNECTED flag in the in-core version of the SCB. This ensures that, in the SCB-paging case, the sequencer will still recognize the reselection as valid even if the version of the SCB with this flag set was never previously paged out to system memory. In the non-paging case, set the MK_MESSAGE flag in SCB_CONTROL directly. aic7xxx_pci.c: Enable the Memeory Write and Invalidate bug workaround for all aic7880 chips with revs < 1. This bug is rarely triggered in FreeBSD as most transfers end on cache-aligned boundaries, but a recheck of my references indicates that these chips are affected.
2000-10-09 01:46:01 +00:00
* Set the MK_MESSAGE control bit indicating
* that we desire to send a message. We
* also set the disconnected flag since
* in the paging case there is no guarantee
* that our SCB control byte matches the
* version on the card. We don't want the
* sequencer to abort the command thinking
* an unsolicited reselection occurred.
*/
scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
scb->flags |= SCB_DEVICE_RESET;
/*
* Remove any cached copy of this SCB in the
* disconnected list in preparation for the
* queuing of our abort SCB. We use the
* same element in the SCB, SCB_NEXT, for
* both the qinfifo and the disconnected list.
*/
ahc_search_disc_list(ahc, target, channel,
lun, scb->hscb->tag,
/*stop_on_first*/TRUE,
/*remove*/TRUE,
Clean up error recovery code: aic7xxx.c: In target mode, reset the TQINPOS on every restart of the sequencer. In the past we did this only during a bus reset, but there are other reasons the sequencer might be reset. In ahc_clear_critical_section(), disable pausing chip interrupts while we step the sequencer out of a critical section. This avoids the possibility of getting a pausing interrupt (unexpected bus free, bus reset, etc.) that would prevent the sequencer from stepping. Send the correct async notifications in the case of a BDR or bus reset. In ahc_loadseq(), correct the calculation of our critical sections. In some cases, the sections would be larger than needed. aic7xxx.h: Remove an unused SCB flag. aic7xxx.seq: MK_MESSAGE is cleared by the kernel, there is no need to waste a sequencer instruction clearing it. aic7xxx_freebsd.c: Go through the host message loop instead of issuing a single byte message directly in the ahc_timeout() case where we are currently on the bus to the device. The effect is the same, but this way we get a nice printf saying that an expected BDR was delivered instead of an unexpected bus free. If we are requeuing an SCB for an error recovery action, be sure to set the DISCONNECTED flag in the in-core version of the SCB. This ensures that, in the SCB-paging case, the sequencer will still recognize the reselection as valid even if the version of the SCB with this flag set was never previously paged out to system memory. In the non-paging case, set the MK_MESSAGE flag in SCB_CONTROL directly. aic7xxx_pci.c: Enable the Memeory Write and Invalidate bug workaround for all aic7880 chips with revs < 1. This bug is rarely triggered in FreeBSD as most transfers end on cache-aligned boundaries, but a recheck of my references indicates that these chips are affected.
2000-10-09 01:46:01 +00:00
/*save_state*/FALSE);
/*
* In the non-paging case, the sequencer will
* never re-reference the in-core SCB.
* To make sure we are notified during
* reslection, set the MK_MESSAGE flag in
* the card's copy of the SCB.
*/
if ((ahc->flags & AHC_PAGESCBS) != 0) {
ahc_outb(ahc, SCBPTR, scb->hscb->tag);
ahc_outb(ahc, SCB_CONTROL,
ahc_inb(ahc, SCB_CONTROL)
| MK_MESSAGE);
}
/*
* Clear out any entries in the QINFIFO first
* so we are the next SCB for this target
* to run.
*/
ahc_search_qinfifo(ahc,
SCB_GET_TARGET(ahc, scb),
channel, SCB_GET_LUN(scb),
SCB_LIST_NULL,
ROLE_INITIATOR,
CAM_REQUEUE_REQ,
SEARCH_COMPLETE);
ahc_print_path(ahc, scb);
printf("Queuing a BDR SCB\n");
prev_scb = NULL;
if (ahc_qinfifo_count(ahc) != 0) {
u_int prev_tag;
prev_tag =
ahc->qinfifo[ahc->qinfifonext - 1];
prev_scb = ahc_lookup_scb(ahc,
prev_tag);
}
ahc_qinfifo_requeue(ahc, prev_scb, scb);
Clean up error recovery code: aic7xxx.c: In target mode, reset the TQINPOS on every restart of the sequencer. In the past we did this only during a bus reset, but there are other reasons the sequencer might be reset. In ahc_clear_critical_section(), disable pausing chip interrupts while we step the sequencer out of a critical section. This avoids the possibility of getting a pausing interrupt (unexpected bus free, bus reset, etc.) that would prevent the sequencer from stepping. Send the correct async notifications in the case of a BDR or bus reset. In ahc_loadseq(), correct the calculation of our critical sections. In some cases, the sections would be larger than needed. aic7xxx.h: Remove an unused SCB flag. aic7xxx.seq: MK_MESSAGE is cleared by the kernel, there is no need to waste a sequencer instruction clearing it. aic7xxx_freebsd.c: Go through the host message loop instead of issuing a single byte message directly in the ahc_timeout() case where we are currently on the bus to the device. The effect is the same, but this way we get a nice printf saying that an expected BDR was delivered instead of an unexpected bus free. If we are requeuing an SCB for an error recovery action, be sure to set the DISCONNECTED flag in the in-core version of the SCB. This ensures that, in the SCB-paging case, the sequencer will still recognize the reselection as valid even if the version of the SCB with this flag set was never previously paged out to system memory. In the non-paging case, set the MK_MESSAGE flag in SCB_CONTROL directly. aic7xxx_pci.c: Enable the Memeory Write and Invalidate bug workaround for all aic7880 chips with revs < 1. This bug is rarely triggered in FreeBSD as most transfers end on cache-aligned boundaries, but a recheck of my references indicates that these chips are affected.
2000-10-09 01:46:01 +00:00
ahc_outb(ahc, SCBPTR, active_scb_index);
scb->io_ctx->ccb_h.timeout_ch =
timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
unpause_sequencer(ahc);
} else {
/* Go "immediatly" to the bus reset */
/* This shouldn't happen */
ahc_set_recoveryscb(ahc, scb);
ahc_print_path(ahc, scb);
printf("SCB %d: Immediate reset. "
"Flags = 0x%x\n", scb->hscb->tag,
scb->flags);
goto bus_reset;
}
}
}
ahc_unlock(ahc, &s);
}
static void
ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
{
union ccb *abort_ccb;
abort_ccb = ccb->cab.abort_ccb;
switch (abort_ccb->ccb_h.func_code) {
case XPT_ACCEPT_TARGET_IO:
case XPT_IMMED_NOTIFY:
case XPT_CONT_TARGET_IO:
{
struct tmode_tstate *tstate;
struct tmode_lstate *lstate;
struct ccb_hdr_slist *list;
cam_status status;
status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
&lstate, TRUE);
if (status != CAM_REQ_CMP) {
ccb->ccb_h.status = status;
break;
}
if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
list = &lstate->accept_tios;
else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
list = &lstate->immed_notifies;
else
list = NULL;
if (list != NULL) {
struct ccb_hdr *curelm;
int found;
curelm = SLIST_FIRST(list);
found = 0;
if (curelm == &abort_ccb->ccb_h) {
found = 1;
SLIST_REMOVE_HEAD(list, sim_links.sle);
} else {
while(curelm != NULL) {
struct ccb_hdr *nextelm;
nextelm =
SLIST_NEXT(curelm, sim_links.sle);
if (nextelm == &abort_ccb->ccb_h) {
found = 1;
SLIST_NEXT(curelm,
sim_links.sle) =
SLIST_NEXT(nextelm,
sim_links.sle);
break;
}
curelm = nextelm;
}
}
if (found) {
abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
xpt_done(abort_ccb);
ccb->ccb_h.status = CAM_REQ_CMP;
} else {
printf("Not found\n");
ccb->ccb_h.status = CAM_PATH_INVALID;
}
break;
}
/* FALLTHROUGH */
}
case XPT_SCSI_IO:
/* XXX Fully implement the hard ones */
ccb->ccb_h.status = CAM_UA_ABORT;
break;
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
xpt_done(ccb);
}
void
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
u_int lun, ac_code code)
{
struct ccb_trans_settings cts;
struct cam_path *path;
void *arg;
int error;
arg = NULL;
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
error = ahc_create_path(ahc, channel, target, lun, &path);
if (error != CAM_REQ_CMP)
return;
switch (code) {
case AC_TRANSFER_NEG:
#ifdef AHC_NEW_TRAN_SETTINGS
cts.type = CTS_TYPE_CURRENT_SETTINGS;
#else
cts.flags = CCB_TRANS_CURRENT_SETTINGS;
#endif
cts.ccb_h.path = path;
Add Perforce RCSIDs for easy revision correlation to my local tree. ahc_pci.c: Bring back the AHC_ALLOW_MEMIO option at least until the memory mapped I/O problem on the SuperMicro 370DR3 is better understood. aic7xxx.c: If we see a spurious SCSI interrupt, attempt to clear it and continue by unpausing the sequencer. Change the interface to ahc_send_async(). Some async messages need to be broadcast to all the luns of a target or all the targets of a bus. This is easier to achieve by passing explicit channel, target, and lun parameters instead of attempting to construct a device info struct to match. Filter the sync parameters for the PPR message in exactly the same way we do for an old fashioned SDTR message. Correct some typos and correct a panic message. Handle rejected PPR messages. In ahc_handle_msg_reject(), let ahc_build_transfer_msg() build any additional transfer messages instead of doing this inline. aic7xxx.h: Increase the size of both msgout_buf and msgin_buf to better accomodate PPR messages. aic7xxx_freebsd.c: Update for change in ahc_send_async() parameters. aic7xxx_freebsd.h Update for change in ahc_send_async() parameters. Honor AHC_ALLOW_MEMIO. aic7xxx_pci.c: Check the error register before going into full blown PCI interrupt handling. This avoids a few costly PCI configuration space reads when we run our PCI interrupt handler because another device sharing our interrupt line is more active than we are. Also unpause the sequencer after processing a PCI interrupt.
2000-09-22 22:18:05 +00:00
cts.ccb_h.target_id = target;
cts.ccb_h.target_lun = lun;
ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
: ahc->our_id_b,
channel, &cts);
arg = &cts;
break;
case AC_SENT_BDR:
case AC_BUS_RESET:
break;
default:
panic("ahc_send_async: Unexpected async event");
}
xpt_async(code, path, arg);
}
void
ahc_platform_set_tags(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo, int enable)
{
}
int
ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
{
ahc->platform_data =
malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
if (ahc->platform_data == NULL)
return (ENOMEM);
memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
return (0);
}
void
ahc_platform_free(struct ahc_softc *ahc)
{
if (ahc->platform_data != NULL) {
if (ahc->platform_data->regs != NULL)
bus_release_resource(ahc->dev_softc,
ahc->platform_data->regs_res_type,
ahc->platform_data->regs_res_id,
ahc->platform_data->regs);
if (ahc->platform_data->irq != NULL)
bus_release_resource(ahc->dev_softc,
ahc->platform_data->irq_res_type,
0, ahc->platform_data->irq);
free(ahc->platform_data, M_DEVBUF);
}
}
int
ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
{
/* We don't sort softcs under FreeBSD so report equal always */
return (0);
}
#if UNUSED
static void
ahc_dump_targcmd(struct target_cmd *cmd)
{
uint8_t *byte;
uint8_t *last_byte;
int i;
byte = &cmd->initiator_channel;
/* Debugging info for received commands */
last_byte = &cmd[1].initiator_channel;
i = 0;
while (byte < last_byte) {
if (i == 0)
printf("\t");
printf("%#x", *byte++);
i++;
if (i == 8) {
printf("\n");
i = 0;
} else {
printf(", ");
}
}
}
#endif