MFC Alexander Motin's direct dispatch, multi-queue, and finer-grained

locking support for CAM

r256826:
Fix several target mode SIMs to not blindly clear ccb_h.flags field of
ATIO CCBs.  Not all CCB flags there belong to them.

r256836:
Remove hard limit on number of BIOs handled with one ATA TRIM request.

r256843:
Merge CAM locking changes from the projects/camlock branch to radically
reduce lock congestion and improve SMP scalability of the SCSI/ATA stack,
preparing the ground for the coming next GEOM direct dispatch support.

r256888:
Unconditionally acquire periph reference on CCB allocation failure.

r256895:
Fix memory and references leak due to unfreed path.

r256960:
Move CAM_UNQUEUED_INDEX setting to the last moment and under the periph lock.
This fixes race condition with cam_periph_ccbwait(), causing use-after-free.

r256975:
Minor (mostly cosmetical) addition to r256960.

r257054:
Some microoptimizations for da and ada drivers:
 - Replace ordered_tag_count counter with single flag;
 - From da remove outstanding_cmds counter, duplicating pending_ccbs list;
 - From da_softc remove unused links field.

r257482:
Fix lock recursion, triggered by `smartctl -a /dev/adaX`.

r257501:
Make getenv_*() functions and respectively TUNABLE_*_FETCH() macros not
allocate memory and so not require sleepable environment.  getenv() has
already used on-stack temporary storage, so just use it more rationally.
getenv_string() receives buffer as argument, so don't need another one.

r257914:
Some CAM locks polishing:
 - Fix LOR and possible lock recursion when handling high-power commands.
Introduce new lock to protect left power quota and list of frozen devices.
 - Correct locking around xpt periph creation.
 - Remove seems never used XPT_FLAG_OPEN xpt periph flag.

Again, Netflix assisted with testing the merge, but all of the credit goes
to Alexander and iX Systems.

Submitted by:	mav
Sponsored by:	iX Systems
This commit is contained in:
scottl 2014-01-07 01:51:48 +00:00
parent 0a34594b9c
commit cd4455d638
37 changed files with 1678 additions and 1701 deletions

View File

@ -80,7 +80,7 @@ typedef enum {
ADA_FLAG_CAN_NCQ = 0x0008,
ADA_FLAG_CAN_DMA = 0x0010,
ADA_FLAG_NEED_OTAG = 0x0020,
ADA_FLAG_WENT_IDLE = 0x0040,
ADA_FLAG_WAS_OTAG = 0x0040,
ADA_FLAG_CAN_TRIM = 0x0080,
ADA_FLAG_OPEN = 0x0100,
ADA_FLAG_SCTX_INIT = 0x0200,
@ -103,7 +103,6 @@ typedef enum {
ADA_CCB_RAHEAD = 0x01,
ADA_CCB_WCACHE = 0x02,
ADA_CCB_BUFFER_IO = 0x03,
ADA_CCB_WAITING = 0x04,
ADA_CCB_DUMP = 0x05,
ADA_CCB_TRIM = 0x06,
ADA_CCB_TYPE_MASK = 0x0F,
@ -123,21 +122,20 @@ struct disk_params {
#define TRIM_MAX_BLOCKS 8
#define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
#define TRIM_MAX_BIOS (TRIM_MAX_RANGES * 4)
struct trim_request {
uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
struct bio *bps[TRIM_MAX_BIOS];
TAILQ_HEAD(, bio) bps;
};
struct ada_softc {
struct bio_queue_head bio_queue;
struct bio_queue_head trim_queue;
int outstanding_cmds; /* Number of active commands */
int refcount; /* Active xpt_action() calls */
ada_state state;
ada_flags flags;
ada_flags flags;
ada_quirks quirks;
int sort_io_queue;
int ordered_tag_count;
int outstanding_cmds;
int trim_max_ranges;
int trim_running;
int read_ahead;
@ -630,14 +628,8 @@ adaclose(struct disk *dp)
int error;
periph = (struct cam_periph *)dp->d_drv1;
cam_periph_lock(periph);
if (cam_periph_hold(periph, PRIBIO) != 0) {
cam_periph_unlock(periph);
cam_periph_release(periph);
return (0);
}
softc = (struct ada_softc *)periph->softc;
cam_periph_lock(periph);
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
("adaclose\n"));
@ -645,7 +637,8 @@ adaclose(struct disk *dp)
/* We only sync the cache if the drive is capable of it. */
if ((softc->flags & ADA_FLAG_DIRTY) != 0 &&
(softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
(periph->flags & CAM_PERIPH_INVALID) == 0) {
(periph->flags & CAM_PERIPH_INVALID) == 0 &&
cam_periph_hold(periph, PRIBIO) == 0) {
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
cam_fill_ataio(&ccb->ataio,
@ -669,10 +662,13 @@ adaclose(struct disk *dp)
else
softc->flags &= ~ADA_FLAG_DIRTY;
xpt_release_ccb(ccb);
cam_periph_unhold(periph);
}
softc->flags &= ~ADA_FLAG_OPEN;
cam_periph_unhold(periph);
while (softc->refcount != 0)
cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1);
cam_periph_unlock(periph);
cam_periph_release(periph);
return (0);
@ -682,23 +678,15 @@ static void
adaschedule(struct cam_periph *periph)
{
struct ada_softc *softc = (struct ada_softc *)periph->softc;
uint32_t prio;
if (softc->state != ADA_STATE_NORMAL)
return;
/* Check if cam_periph_getccb() was called. */
prio = periph->immediate_priority;
/* Check if we have more work to do. */
if (bioq_first(&softc->bio_queue) ||
(!softc->trim_running && bioq_first(&softc->trim_queue))) {
prio = CAM_PRIORITY_NORMAL;
xpt_schedule(periph, CAM_PRIORITY_NORMAL);
}
/* Schedule CCB if any of above is true. */
if (prio != CAM_PRIORITY_NONE)
xpt_schedule(periph, prio);
}
/*
@ -962,7 +950,7 @@ adaasync(void *callback_arg, u_int32_t code,
status = cam_periph_alloc(adaregister, adaoninvalidate,
adacleanup, adastart,
"ada", CAM_PERIPH_BIO,
cgd->ccb_h.path, adaasync,
path, adaasync,
AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -1038,8 +1026,10 @@ adaasync(void *callback_arg, u_int32_t code,
softc->state = ADA_STATE_WCACHE;
else
break;
cam_periph_acquire(periph);
xpt_schedule(periph, CAM_PRIORITY_DEV);
if (cam_periph_acquire(periph) != CAM_REQ_CMP)
softc->state = ADA_STATE_NORMAL;
else
xpt_schedule(periph, CAM_PRIORITY_DEV);
}
default:
cam_periph_async(periph, code, path, arg);
@ -1346,8 +1336,8 @@ adaregister(struct cam_periph *periph, void *arg)
* Create our sysctl variables, now that we know
* we have successfully attached.
*/
cam_periph_acquire(periph);
taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
if (cam_periph_acquire(periph) == CAM_REQ_CMP)
taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
/*
* Add async callbacks for bus reset and
@ -1365,7 +1355,7 @@ adaregister(struct cam_periph *periph, void *arg)
* Schedule a periodic event to occasionally send an
* ordered tag to a device.
*/
callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
callout_reset(&softc->sendordered_c,
(ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
adasendorderedtag, softc);
@ -1373,16 +1363,17 @@ adaregister(struct cam_periph *periph, void *arg)
if (ADA_RA >= 0 &&
cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
softc->state = ADA_STATE_RAHEAD;
cam_periph_acquire(periph);
xpt_schedule(periph, CAM_PRIORITY_DEV);
} else if (ADA_WC >= 0 &&
cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
softc->state = ADA_STATE_WCACHE;
cam_periph_acquire(periph);
xpt_schedule(periph, CAM_PRIORITY_DEV);
} else
} else {
softc->state = ADA_STATE_NORMAL;
return(CAM_REQ_CMP);
}
if (cam_periph_acquire(periph) != CAM_REQ_CMP)
softc->state = ADA_STATE_NORMAL;
else
xpt_schedule(periph, CAM_PRIORITY_DEV);
return(CAM_REQ_CMP);
}
@ -1400,29 +1391,17 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
struct bio *bp;
u_int8_t tag_code;
/* Execute immediate CCB if waiting. */
if (periph->immediate_priority <= periph->pinfo.priority) {
CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
("queuing for immediate ccb\n"));
start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
/* Have more work to do, so ensure we stay scheduled */
adaschedule(periph);
break;
}
/* Run TRIM if not running yet. */
if (!softc->trim_running &&
(bp = bioq_first(&softc->trim_queue)) != 0) {
struct trim_request *req = &softc->trim_req;
struct bio *bp1;
uint64_t lastlba = (uint64_t)-1;
int bps = 0, c, lastcount = 0, off, ranges = 0;
int c, lastcount = 0, off, ranges = 0;
softc->trim_running = 1;
bzero(req, sizeof(*req));
TAILQ_INIT(&req->bps);
bp1 = bp;
do {
uint64_t lba = bp1->bio_pblkno;
@ -1465,10 +1444,9 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
*/
}
lastlba = lba;
req->bps[bps++] = bp1;
TAILQ_INSERT_TAIL(&req->bps, bp1, bio_queue);
bp1 = bioq_first(&softc->trim_queue);
if (bps >= TRIM_MAX_BIOS ||
bp1 == NULL ||
if (bp1 == NULL ||
bp1->bio_bcount / softc->params.secsize >
(softc->trim_max_ranges - ranges) *
ATA_DSM_RANGE_MAX)
@ -1487,6 +1465,7 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
1) / ATA_DSM_BLK_RANGES);
start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
goto out;
}
/* Run regular command. */
@ -1500,7 +1479,7 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
if ((bp->bio_flags & BIO_ORDERED) != 0
|| (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
softc->flags &= ~ADA_FLAG_NEED_OTAG;
softc->ordered_tag_count++;
softc->flags |= ADA_FLAG_WAS_OTAG;
tag_code = 0;
} else {
tag_code = 1;
@ -1655,10 +1634,15 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
break;
}
start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
out:
start_ccb->ccb_h.ccb_bp = bp;
softc->outstanding_cmds++;
softc->refcount++;
cam_periph_unlock(periph);
xpt_action(start_ccb);
cam_periph_lock(periph);
softc->refcount--;
/* May have more work to do, so ensure we stay scheduled */
adaschedule(periph);
@ -1667,13 +1651,6 @@ out:
case ADA_STATE_RAHEAD:
case ADA_STATE_WCACHE:
{
if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
softc->state = ADA_STATE_NORMAL;
xpt_release_ccb(start_ccb);
cam_periph_release_locked(periph);
return;
}
cam_fill_ataio(ataio,
1,
adadone,
@ -1722,10 +1699,12 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
struct bio *bp;
int error;
cam_periph_lock(periph);
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
error = adaerror(done_ccb, 0, 0);
if (error == ERESTART) {
/* A retry was scheduled, so just return. */
cam_periph_unlock(periph);
return;
}
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
@ -1754,29 +1733,32 @@ adadone(struct cam_periph *periph, union ccb *done_ccb)
}
softc->outstanding_cmds--;
if (softc->outstanding_cmds == 0)
softc->flags |= ADA_FLAG_WENT_IDLE;
softc->flags |= ADA_FLAG_WAS_OTAG;
xpt_release_ccb(done_ccb);
if (state == ADA_CCB_TRIM) {
struct trim_request *req =
(struct trim_request *)ataio->data_ptr;
int i;
TAILQ_HEAD(, bio) queue;
struct bio *bp1;
for (i = 1; i < TRIM_MAX_BIOS && req->bps[i]; i++) {
struct bio *bp1 = req->bps[i];
bp1->bio_error = bp->bio_error;
if (bp->bio_flags & BIO_ERROR) {
TAILQ_INIT(&queue);
TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue);
softc->trim_running = 0;
adaschedule(periph);
cam_periph_unlock(periph);
while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
TAILQ_REMOVE(&queue, bp1, bio_queue);
bp1->bio_error = error;
if (error != 0) {
bp1->bio_flags |= BIO_ERROR;
bp1->bio_resid = bp1->bio_bcount;
} else
bp1->bio_resid = 0;
biodone(bp1);
}
softc->trim_running = 0;
} else {
cam_periph_unlock(periph);
biodone(bp);
adaschedule(periph);
} else
biodone(bp);
break;
}
return;
}
case ADA_CCB_RAHEAD:
{
@ -1852,12 +1834,6 @@ out:
cam_periph_release_locked(periph);
return;
}
case ADA_CCB_WAITING:
{
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
case ADA_CCB_DUMP:
/* No-op. We're polling */
return;
@ -1919,14 +1895,11 @@ adasendorderedtag(void *arg)
struct ada_softc *softc = arg;
if (ada_send_ordered) {
if ((softc->ordered_tag_count == 0)
&& ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
softc->flags |= ADA_FLAG_NEED_OTAG;
if (softc->outstanding_cmds > 0) {
if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0)
softc->flags |= ADA_FLAG_NEED_OTAG;
softc->flags &= ~ADA_FLAG_WAS_OTAG;
}
if (softc->outstanding_cmds > 0)
softc->flags &= ~ADA_FLAG_WENT_IDLE;
softc->ordered_tag_count = 0;
}
/* Queue us up again */
callout_reset(&softc->sendordered_c,

View File

@ -293,7 +293,7 @@ pmpasync(void *callback_arg, u_int32_t code,
status = cam_periph_alloc(pmpregister, pmponinvalidate,
pmpcleanup, pmpstart,
"pmp", CAM_PERIPH_BIO,
cgd->ccb_h.path, pmpasync,
path, pmpasync,
AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -318,13 +318,17 @@ pmpasync(void *callback_arg, u_int32_t code,
if (code == AC_SENT_BDR || code == AC_BUS_RESET)
softc->found = 0; /* We have to reset everything. */
if (softc->state == PMP_STATE_NORMAL) {
if (softc->pm_pid == 0x37261095 ||
softc->pm_pid == 0x38261095)
softc->state = PMP_STATE_PM_QUIRKS_1;
else
softc->state = PMP_STATE_PRECONFIG;
cam_periph_acquire(periph);
xpt_schedule(periph, CAM_PRIORITY_DEV);
if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
if (softc->pm_pid == 0x37261095 ||
softc->pm_pid == 0x38261095)
softc->state = PMP_STATE_PM_QUIRKS_1;
else
softc->state = PMP_STATE_PRECONFIG;
xpt_schedule(periph, CAM_PRIORITY_DEV);
} else {
pmprelease(periph, softc->found);
xpt_release_boot();
}
} else
softc->restart = 1;
break;

View File

@ -182,7 +182,7 @@ static struct cam_ed *
static void ata_device_transport(struct cam_path *path);
static void ata_get_transfer_settings(struct ccb_trans_settings *cts);
static void ata_set_transfer_settings(struct ccb_trans_settings *cts,
struct cam_ed *device,
struct cam_path *path,
int async_update);
static void ata_dev_async(u_int32_t async_code,
struct cam_eb *bus,
@ -249,6 +249,7 @@ proberegister(struct cam_periph *periph, void *arg)
return (status);
}
CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
ata_device_transport(periph->path);
probeschedule(periph);
return(CAM_REQ_CMP);
}
@ -1320,6 +1321,7 @@ ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
struct cam_path *path;
ata_scan_bus_info *scan_info;
union ccb *work_ccb, *reset_ccb;
struct mtx *mtx;
cam_status status;
CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
@ -1395,11 +1397,14 @@ ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
xpt_done(request_ccb);
break;
}
mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
goto scan_next;
case XPT_SCAN_LUN:
work_ccb = request_ccb;
/* Reuse the same CCB to query if a device was really found */
scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0;
mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
mtx_lock(mtx);
/* If there is PMP... */
if ((scan_info->cpi->hba_inquiry & PI_SATAPM) &&
(scan_info->counter == scan_info->cpi->max_target)) {
@ -1428,6 +1433,7 @@ ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
((scan_info->cpi->hba_inquiry & PI_SATAPM) ?
0 : scan_info->cpi->max_target)) {
done:
mtx_unlock(mtx);
xpt_free_ccb(work_ccb);
xpt_free_ccb((union ccb *)scan_info->cpi);
request_ccb = scan_info->request_ccb;
@ -1444,6 +1450,8 @@ scan_next:
scan_info->request_ccb->ccb_h.path_id,
scan_info->counter, 0);
if (status != CAM_REQ_CMP) {
if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN)
mtx_unlock(mtx);
printf("xpt_scan_bus: xpt_create_path failed"
" with status %#x, bus scan halted\n",
status);
@ -1459,9 +1467,15 @@ scan_next:
scan_info->request_ccb->ccb_h.pinfo.priority);
work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
work_ccb->ccb_h.cbfcnp = ata_scan_bus;
work_ccb->ccb_h.flags |= CAM_UNLOCKED;
work_ccb->ccb_h.ppriv_ptr0 = scan_info;
work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags;
mtx_unlock(mtx);
if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN)
mtx = NULL;
xpt_action(work_ccb);
if (mtx != NULL)
mtx_lock(mtx);
break;
default:
break;
@ -1476,6 +1490,7 @@ ata_scan_lun(struct cam_periph *periph, struct cam_path *path,
cam_status status;
struct cam_path *new_path;
struct cam_periph *old_periph;
int lock;
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_scan_lun\n"));
@ -1510,10 +1525,14 @@ ata_scan_lun(struct cam_periph *periph, struct cam_path *path,
}
xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT);
request_ccb->ccb_h.cbfcnp = xptscandone;
request_ccb->ccb_h.flags |= CAM_UNLOCKED;
request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
request_ccb->crcn.flags = flags;
}
lock = (xpt_path_owned(path) == 0);
if (lock)
xpt_path_lock(path);
if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) {
if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
probe_softc *softc;
@ -1540,6 +1559,8 @@ ata_scan_lun(struct cam_periph *periph, struct cam_path *path,
xpt_done(request_ccb);
}
}
if (lock)
xpt_path_unlock(path);
}
static void
@ -1553,7 +1574,6 @@ xptscandone(struct cam_periph *periph, union ccb *done_ccb)
static struct cam_ed *
ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
{
struct cam_path path;
struct ata_quirk_entry *quirk;
struct cam_ed *device;
@ -1574,22 +1594,6 @@ ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
device->queue_flags = 0;
device->serial_num = NULL;
device->serial_num_len = 0;
/*
* XXX should be limited by number of CCBs this bus can
* do.
*/
bus->sim->max_ccbs += device->ccbq.devq_openings;
if (lun_id != CAM_LUN_WILDCARD) {
xpt_compile_path(&path,
NULL,
bus->path_id,
target->target_id,
lun_id);
ata_device_transport(&path);
xpt_release_path(&path);
}
return (device);
}
@ -1712,15 +1716,8 @@ ata_dev_advinfo(union ccb *start_ccb)
start_ccb->ccb_h.status = CAM_REQ_CMP;
if (cdai->flags & CDAI_FLAG_STORE) {
int owned;
owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx);
if (owned == 0)
mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx);
xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
(void *)(uintptr_t)cdai->buftype);
if (owned == 0)
mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx);
}
}
@ -1732,7 +1729,7 @@ ata_action(union ccb *start_ccb)
case XPT_SET_TRAN_SETTINGS:
{
ata_set_transfer_settings(&start_ccb->cts,
start_ccb->ccb_h.path->device,
start_ccb->ccb_h.path,
/*async_update*/FALSE);
break;
}
@ -1791,11 +1788,9 @@ ata_get_transfer_settings(struct ccb_trans_settings *cts)
struct ccb_trans_settings_ata *ata;
struct ccb_trans_settings_scsi *scsi;
struct cam_ed *device;
struct cam_sim *sim;
device = cts->ccb_h.path->device;
sim = cts->ccb_h.path->bus->sim;
(*(sim->sim_action))(sim, (union ccb *)cts);
xpt_action_default((union ccb *)cts);
if (cts->protocol == PROTO_UNKNOWN ||
cts->protocol == PROTO_UNSPECIFIED) {
@ -1832,17 +1827,17 @@ ata_get_transfer_settings(struct ccb_trans_settings *cts)
}
static void
ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path,
int async_update)
{
struct ccb_pathinq cpi;
struct ccb_trans_settings_ata *ata;
struct ccb_trans_settings_scsi *scsi;
struct cam_sim *sim;
struct ata_params *ident_data;
struct scsi_inquiry_data *inq_data;
struct cam_ed *device;
if (device == NULL) {
if (path == NULL || (device = path->device) == NULL) {
cts->ccb_h.status = CAM_PATH_INVALID;
xpt_done((union ccb *)cts);
return;
@ -1859,14 +1854,14 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
cts->protocol_version = device->protocol_version;
if (cts->protocol != device->protocol) {
xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
xpt_print(path, "Uninitialized Protocol %x:%x?\n",
cts->protocol, device->protocol);
cts->protocol = device->protocol;
}
if (cts->protocol_version > device->protocol_version) {
if (bootverbose) {
xpt_print(cts->ccb_h.path, "Down reving Protocol "
xpt_print(path, "Down reving Protocol "
"Version from %d to %d?\n", cts->protocol_version,
device->protocol_version);
}
@ -1884,21 +1879,20 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
cts->transport_version = device->transport_version;
if (cts->transport != device->transport) {
xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
xpt_print(path, "Uninitialized Transport %x:%x?\n",
cts->transport, device->transport);
cts->transport = device->transport;
}
if (cts->transport_version > device->transport_version) {
if (bootverbose) {
xpt_print(cts->ccb_h.path, "Down reving Transport "
xpt_print(path, "Down reving Transport "
"Version from %d to %d?\n", cts->transport_version,
device->transport_version);
}
cts->transport_version = device->transport_version;
}
sim = cts->ccb_h.path->bus->sim;
ident_data = &device->ident_data;
inq_data = &device->inq_data;
if (cts->protocol == PROTO_ATA)
@ -1909,7 +1903,7 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
scsi = &cts->proto_specific.scsi;
else
scsi = NULL;
xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE);
xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
cpi.ccb_h.func_code = XPT_PATH_INQ;
xpt_action((union ccb *)&cpi);
@ -1953,11 +1947,11 @@ ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
device->tag_delay_count = CAM_TAG_DELAY_COUNT;
device->flags |= CAM_DEV_TAG_AFTER_COUNT;
} else if (nowt && !newt)
xpt_stop_tags(cts->ccb_h.path);
xpt_stop_tags(path);
}
if (async_update == FALSE)
(*(sim->sim_action))(sim, (union ccb *)cts);
xpt_action_default((union ccb *)cts);
}
/*
@ -2014,10 +2008,14 @@ ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
xpt_release_device(device);
} else if (async_code == AC_TRANSFER_NEG) {
struct ccb_trans_settings *settings;
struct cam_path path;
settings = (struct ccb_trans_settings *)async_arg;
ata_set_transfer_settings(settings, device,
xpt_compile_path(&path, NULL, bus->path_id, target->target_id,
device->lun_id);
ata_set_transfer_settings(settings, &path,
/*async_update*/TRUE);
xpt_release_path(&path);
}
}
@ -2030,7 +2028,7 @@ ata_announce_periph(struct cam_periph *periph)
u_int speed;
u_int mb;
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;

View File

@ -104,7 +104,9 @@ typedef enum {
CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */
CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */
CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */
CAM_SEND_STATUS = 0x40000000 /* Send status after data phase */
CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */
CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */
} ccb_flags;
typedef enum {
@ -151,6 +153,9 @@ typedef enum {
/* Device statistics (error counts, etc.) */
XPT_DEV_ADVINFO = 0x0e,
/* Get/Set Device advanced information */
XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB
| XPT_FC_XPT_ONLY,
/* Asynchronous event */
/* SCSI Control Functions: 0x10->0x1F */
XPT_ABORT = 0x10,
/* Abort the specified CCB */
@ -1153,6 +1158,16 @@ struct ccb_dev_advinfo {
uint8_t *buf; /* IN/OUT: Buffer for requested data */
};
/*
* CCB for sending async events
*/
struct ccb_async {
struct ccb_hdr ccb_h;
uint32_t async_code;
off_t async_arg_size;
void *async_arg_ptr;
};
/*
* Union of all CCB types for kernel space allocation. This union should
* never be used for manipulating CCBs - its only use is for the allocation
@ -1192,6 +1207,7 @@ union ccb {
struct ccb_debug cdbg;
struct ccb_ataio ataio;
struct ccb_dev_advinfo cdai;
struct ccb_async casync;
};
__BEGIN_DECLS

View File

@ -196,12 +196,12 @@ cam_periph_alloc(periph_ctor_t *periph_ctor,
path_id = xpt_path_path_id(path);
target_id = xpt_path_target_id(path);
lun_id = xpt_path_lun_id(path);
cam_init_pinfo(&periph->pinfo);
periph->periph_start = periph_start;
periph->periph_dtor = periph_dtor;
periph->periph_oninval = periph_oninvalidate;
periph->type = type;
periph->periph_name = name;
periph->scheduled_priority = CAM_PRIORITY_NONE;
periph->immediate_priority = CAM_PRIORITY_NONE;
periph->refcount = 1; /* Dropped by invalidation. */
periph->sim = sim;
@ -298,7 +298,7 @@ cam_periph_find(struct cam_path *path, char *name)
TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
if (xpt_path_comp(periph->path, path) == 0) {
xpt_unlock_buses();
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
return(periph);
}
}
@ -379,7 +379,7 @@ void
cam_periph_release_locked_buses(struct cam_periph *periph)
{
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
if (--periph->refcount == 0)
camperiphfree(periph);
@ -400,16 +400,16 @@ cam_periph_release_locked(struct cam_periph *periph)
void
cam_periph_release(struct cam_periph *periph)
{
struct cam_sim *sim;
struct mtx *mtx;
if (periph == NULL)
return;
sim = periph->sim;
mtx_assert(sim->mtx, MA_NOTOWNED);
mtx_lock(sim->mtx);
cam_periph_assert(periph, MA_NOTOWNED);
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
cam_periph_release_locked(periph);
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
}
int
@ -427,10 +427,10 @@ cam_periph_hold(struct cam_periph *periph, int priority)
if (cam_periph_acquire(periph) != CAM_REQ_CMP)
return (ENXIO);
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
periph->flags |= CAM_PERIPH_LOCK_WANTED;
if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
if ((error = cam_periph_sleep(periph, periph, priority,
"caplck", 0)) != 0) {
cam_periph_release_locked(periph);
return (error);
@ -449,7 +449,7 @@ void
cam_periph_unhold(struct cam_periph *periph)
{
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
periph->flags &= ~CAM_PERIPH_LOCKED;
if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
@ -577,7 +577,7 @@ void
cam_periph_invalidate(struct cam_periph *periph)
{
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
/*
* We only call this routine the first time a peripheral is
* invalidated.
@ -600,7 +600,9 @@ camperiphfree(struct cam_periph *periph)
{
struct periph_driver **p_drv;
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
periph->periph_name, periph->unit_number));
for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
break;
@ -947,40 +949,14 @@ cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
PRELE(curproc);
}
union ccb *
cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
{
struct ccb_hdr *ccb_h;
mtx_assert(periph->sim->mtx, MA_OWNED);
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
while (SLIST_FIRST(&periph->ccb_list) == NULL) {
if (periph->immediate_priority > priority)
periph->immediate_priority = priority;
xpt_schedule(periph, priority);
if ((SLIST_FIRST(&periph->ccb_list) != NULL)
&& (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
break;
mtx_assert(periph->sim->mtx, MA_OWNED);
mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
0);
}
ccb_h = SLIST_FIRST(&periph->ccb_list);
SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
return ((union ccb *)ccb_h);
}
void
cam_periph_ccbwait(union ccb *ccb)
{
struct cam_sim *sim;
sim = xpt_path_sim(ccb->ccb_h.path);
if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
|| ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO,
"cbwait", 0);
}
int
@ -1045,6 +1021,14 @@ cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
return(error);
}
static void
cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
{
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
}
int
cam_periph_runccb(union ccb *ccb,
int (*error_routine)(union ccb *ccb,
@ -1053,12 +1037,9 @@ cam_periph_runccb(union ccb *ccb,
cam_flags camflags, u_int32_t sense_flags,
struct devstat *ds)
{
struct cam_sim *sim;
int error;
error = 0;
sim = xpt_path_sim(ccb->ccb_h.path);
mtx_assert(sim->mtx, MA_OWNED);
xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
/*
* If the user has supplied a stats structure, and if we understand
@ -1068,6 +1049,7 @@ cam_periph_runccb(union ccb *ccb,
ccb->ccb_h.func_code == XPT_ATA_IO))
devstat_start_transaction(ds, NULL);
ccb->ccb_h.cbfcnp = cam_periph_done;
xpt_action(ccb);
do {
@ -1786,9 +1768,11 @@ cam_periph_error(union ccb *ccb, cam_flags camflags,
scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
scan_ccb->crcn.flags = 0;
xpt_rescan(scan_ccb);
} else
} else {
xpt_print(newpath,
"Can't allocate CCB to rescan target\n");
xpt_free_path(newpath);
}
}
}

View File

@ -35,6 +35,7 @@
#include <cam/cam_sim.h>
#ifdef _KERNEL
#include <sys/taskqueue.h>
#include <cam/cam_xpt.h>
@ -103,7 +104,6 @@ typedef cam_status periph_ctor_t (struct cam_periph *periph,
typedef void periph_oninv_t (struct cam_periph *periph);
typedef void periph_dtor_t (struct cam_periph *periph);
struct cam_periph {
cam_pinfo pinfo;
periph_start_t *periph_start;
periph_oninv_t *periph_oninval;
periph_dtor_t *periph_dtor;
@ -120,15 +120,20 @@ struct cam_periph {
#define CAM_PERIPH_INVALID 0x08
#define CAM_PERIPH_NEW_DEV_FOUND 0x10
#define CAM_PERIPH_RECOVERY_INPROG 0x20
#define CAM_PERIPH_RUN_TASK 0x40
#define CAM_PERIPH_FREE 0x80
#define CAM_PERIPH_ANNOUNCED 0x100
u_int32_t immediate_priority;
uint32_t scheduled_priority;
uint32_t immediate_priority;
int periph_allocating;
int periph_allocated;
u_int32_t refcount;
SLIST_HEAD(, ccb_hdr) ccb_list; /* For "immediate" requests */
SLIST_ENTRY(cam_periph) periph_links;
TAILQ_ENTRY(cam_periph) unit_links;
ac_callback_t *deferred_callback;
ac_code deferred_ac;
struct task periph_run_task;
};
#define CAM_PERIPH_MAXMAPS 2
@ -185,30 +190,26 @@ void cam_periph_freeze_after_event(struct cam_periph *periph,
int cam_periph_error(union ccb *ccb, cam_flags camflags,
u_int32_t sense_flags, union ccb *save_ccb);
static __inline void
cam_periph_lock(struct cam_periph *periph)
static __inline struct mtx *
cam_periph_mtx(struct cam_periph *periph)
{
mtx_lock(periph->sim->mtx);
return (xpt_path_mtx(periph->path));
}
static __inline void
cam_periph_unlock(struct cam_periph *periph)
{
mtx_unlock(periph->sim->mtx);
}
#define cam_periph_owned(periph) \
mtx_owned(xpt_path_mtx((periph)->path))
static __inline int
cam_periph_owned(struct cam_periph *periph)
{
return (mtx_owned(periph->sim->mtx));
}
#define cam_periph_lock(periph) \
mtx_lock(xpt_path_mtx((periph)->path))
static __inline int
cam_periph_sleep(struct cam_periph *periph, void *chan, int priority,
const char *wmesg, int timo)
{
return (msleep(chan, periph->sim->mtx, priority, wmesg, timo));
}
#define cam_periph_unlock(periph) \
mtx_unlock(xpt_path_mtx((periph)->path))
#define cam_periph_assert(periph, what) \
mtx_assert(xpt_path_mtx((periph)->path), (what))
#define cam_periph_sleep(periph, chan, priority, wmesg, timo) \
xpt_path_sleep((periph)->path, (chan), (priority), (wmesg), (timo))
static inline struct cam_periph *
cam_periph_acquire_first(struct periph_driver *driver)
@ -230,7 +231,7 @@ cam_periph_acquire_next(struct cam_periph *pperiph)
{
struct cam_periph *periph = pperiph;
mtx_assert(pperiph->sim->mtx, MA_NOTOWNED);
cam_periph_assert(pperiph, MA_NOTOWNED);
xpt_lock_buses();
do {
periph = TAILQ_NEXT(periph, unit_links);

View File

@ -220,27 +220,30 @@ cam_devq_alloc(int devices, int openings)
}
if (cam_devq_init(devq, devices, openings) != 0) {
free(devq, M_CAMDEVQ);
return (NULL);
return (NULL);
}
return (devq);
}
int
cam_devq_init(struct cam_devq *devq, int devices, int openings)
{
bzero(devq, sizeof(*devq));
mtx_init(&devq->send_mtx, "CAM queue lock", NULL, MTX_DEF);
if (camq_init(&devq->send_queue, devices) != 0)
return (1);
devq->send_openings = openings;
devq->send_active = 0;
return (0);
devq->send_active = 0;
return (0);
}
void
cam_devq_free(struct cam_devq *devq)
{
camq_fini(&devq->send_queue);
mtx_destroy(&devq->send_mtx);
free(devq, M_CAMDEVQ);
}
@ -286,6 +289,7 @@ cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size)
int delta;
delta = new_size - (ccbq->dev_active + ccbq->dev_openings);
ccbq->total_openings += delta;
ccbq->devq_openings += delta;
ccbq->dev_openings += delta;
@ -303,6 +307,7 @@ cam_ccbq_init(struct cam_ccbq *ccbq, int openings)
if (camq_init(&ccbq->queue,
imax(64, 1 << fls(openings + openings / 2))) != 0)
return (1);
ccbq->total_openings = openings;
ccbq->devq_openings = openings;
ccbq->dev_openings = openings;
return (0);

View File

@ -33,6 +33,8 @@
#ifdef _KERNEL
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <cam/cam.h>
@ -59,8 +61,8 @@ struct cam_ccbq {
struct camq queue;
struct ccb_hdr_tailq queue_extra_head;
int queue_extra_entries;
int total_openings;
int devq_openings;
int devq_allocating;
int dev_openings;
int dev_active;
int held;
@ -69,9 +71,10 @@ struct cam_ccbq {
struct cam_ed;
struct cam_devq {
struct camq send_queue;
int send_openings;
int send_active;
struct mtx send_mtx;
struct camq send_queue;
int send_openings;
int send_active;
};

View File

@ -87,7 +87,6 @@ cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll,
sim->flags = 0;
sim->refcount = 1;
sim->devq = queue;
sim->max_ccbs = 8; /* Reserve for management purposes. */
sim->mtx = mtx;
if (mtx == &Giant) {
sim->flags |= 0;
@ -96,17 +95,12 @@ cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll,
sim->flags |= CAM_SIM_MPSAFE;
callout_init(&sim->callout, 1);
}
SLIST_INIT(&sim->ccb_freeq);
TAILQ_INIT(&sim->sim_doneq);
return (sim);
}
void
cam_sim_free(struct cam_sim *sim, int free_devq)
{
union ccb *ccb;
int error;
mtx_assert(sim->mtx, MA_OWNED);
@ -118,10 +112,6 @@ cam_sim_free(struct cam_sim *sim, int free_devq)
KASSERT(sim->refcount == 0, ("sim->refcount == 0"));
while ((ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) != NULL) {
SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
xpt_free_ccb(ccb);
}
if (free_devq)
cam_simq_free(sim->devq);
free(sim, M_CAMSIM);
@ -130,21 +120,31 @@ cam_sim_free(struct cam_sim *sim, int free_devq)
void
cam_sim_release(struct cam_sim *sim)
{
KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
mtx_assert(sim->mtx, MA_OWNED);
int lock;
lock = (mtx_owned(sim->mtx) == 0);
if (lock)
CAM_SIM_LOCK(sim);
KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
sim->refcount--;
if (sim->refcount == 0)
wakeup(sim);
if (lock)
CAM_SIM_UNLOCK(sim);
}
void
cam_sim_hold(struct cam_sim *sim)
{
KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
mtx_assert(sim->mtx, MA_OWNED);
int lock;
lock = (mtx_owned(sim->mtx) == 0);
if (lock)
CAM_SIM_LOCK(sim);
KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
sim->refcount++;
if (lock)
CAM_SIM_UNLOCK(sim);
}
void

View File

@ -104,23 +104,9 @@ struct cam_sim {
u_int32_t flags;
#define CAM_SIM_REL_TIMEOUT_PENDING 0x01
#define CAM_SIM_MPSAFE 0x02
#define CAM_SIM_ON_DONEQ 0x04
#define CAM_SIM_POLLED 0x08
#define CAM_SIM_BATCH 0x10
struct callout callout;
struct cam_devq *devq; /* Device Queue to use for this SIM */
int refcount; /* References to the SIM. */
/* "Pool" of inactive ccbs managed by xpt_get_ccb and xpt_release_ccb */
SLIST_HEAD(,ccb_hdr) ccb_freeq;
/*
* Maximum size of ccb pool. Modified as devices are added/removed
* or have their * opening counts changed.
*/
u_int max_ccbs;
/* Current count of allocated ccbs */
u_int ccb_count;
};
#define CAM_SIM_LOCK(sim) mtx_lock((sim)->mtx)

File diff suppressed because it is too large Load Diff

View File

@ -56,6 +56,7 @@ struct cam_path;
struct async_node {
SLIST_ENTRY(async_node) links;
u_int32_t event_enable; /* Async Event enables */
u_int32_t event_lock; /* Take SIM lock for handlers. */
void (*callback)(void *arg, u_int32_t code,
struct cam_path *path, void *args);
void *callback_arg;
@ -110,6 +111,13 @@ void xpt_hold_boot(void);
void xpt_release_boot(void);
void xpt_lock_buses(void);
void xpt_unlock_buses(void);
struct mtx * xpt_path_mtx(struct cam_path *path);
#define xpt_path_lock(path) mtx_lock(xpt_path_mtx(path))
#define xpt_path_unlock(path) mtx_unlock(xpt_path_mtx(path))
#define xpt_path_assert(path, what) mtx_assert(xpt_path_mtx(path), (what))
#define xpt_path_owned(path) mtx_owned(xpt_path_mtx(path))
#define xpt_path_sleep(path, chan, priority, wmesg, timo) \
msleep((chan), xpt_path_mtx(path), (priority), (wmesg), (timo))
cam_status xpt_register_async(int event, ac_callback_t *cbfunc,
void *cbarg, struct cam_path *path);
cam_status xpt_compile_path(struct cam_path *new_path,
@ -117,6 +125,10 @@ cam_status xpt_compile_path(struct cam_path *new_path,
path_id_t path_id,
target_id_t target_id,
lun_id_t lun_id);
cam_status xpt_clone_path(struct cam_path **new_path,
struct cam_path *path);
void xpt_copy_path(struct cam_path *new_path,
struct cam_path *path);
void xpt_release_path(struct cam_path *path);

View File

@ -29,6 +29,8 @@
#ifndef _CAM_CAM_XPT_INTERNAL_H
#define _CAM_CAM_XPT_INTERNAL_H 1
#include <sys/taskqueue.h>
/* Forward Declarations */
struct cam_eb;
struct cam_et;
@ -54,31 +56,17 @@ struct xpt_xport {
xpt_announce_periph_func announce;
};
/*
* Structure for queueing a device in a run queue.
* There is one run queue for allocating new ccbs,
* and another for sending ccbs to the controller.
*/
struct cam_ed_qinfo {
cam_pinfo pinfo;
struct cam_ed *device;
};
/*
* The CAM EDT (Existing Device Table) contains the device information for
* all devices for all busses in the system. The table contains a
* cam_ed structure for each device on the bus.
*/
struct cam_ed {
cam_pinfo devq_entry;
TAILQ_ENTRY(cam_ed) links;
struct cam_ed_qinfo devq_entry;
struct cam_et *target;
struct cam_sim *sim;
lun_id_t lun_id;
struct camq drvq; /*
* Queue of type drivers wanting to do
* work on this device.
*/
struct cam_ccbq ccbq; /* Queue of pending ccbs */
struct async_list asyncs; /* Async callback info for this B/T/L */
struct periph_list periphs; /* All attached devices */
@ -125,6 +113,8 @@ struct cam_ed {
u_int32_t refcount;
struct callout callout;
STAILQ_ENTRY(cam_ed) highpowerq_entry;
struct mtx device_mtx;
struct task device_destroy_task;
};
/*
@ -143,6 +133,7 @@ struct cam_et {
struct timeval last_reset;
u_int rpl_size;
struct scsi_report_luns_data *luns;
struct mtx luns_mtx; /* Protection for luns field. */
};
/*
@ -162,6 +153,7 @@ struct cam_eb {
u_int generation;
device_t parent_dev;
struct xpt_xport *xport;
struct mtx eb_mtx; /* Bus topology mutex. */
};
struct cam_path {
@ -179,8 +171,6 @@ struct cam_ed * xpt_alloc_device(struct cam_eb *bus,
lun_id_t lun_id);
void xpt_acquire_device(struct cam_ed *device);
void xpt_release_device(struct cam_ed *device);
int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
u_int32_t new_priority);
u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
void xpt_start_tags(struct cam_path *path);
void xpt_stop_tags(struct cam_path *path);

View File

@ -46,8 +46,7 @@ u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count);
void xpt_release_devq(struct cam_path *path,
u_int count, int run_queue);
void xpt_done(union ccb *done_ccb);
void xpt_batch_start(struct cam_sim *sim);
void xpt_batch_done(struct cam_sim *sim);
void xpt_done_direct(union ccb *done_ccb);
#endif
#endif /* _CAM_CAM_XPT_SIM_H */

View File

@ -504,14 +504,9 @@ static void
cfcs_done(union ctl_io *io)
{
union ccb *ccb;
struct cfcs_softc *softc;
struct cam_sim *sim;
ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
sim = xpt_path_sim(ccb->ccb_h.path);
softc = (struct cfcs_softc *)cam_sim_softc(sim);
/*
* At this point we should have status. If we don't, that's a bug.
*/
@ -550,10 +545,7 @@ cfcs_done(union ctl_io *io)
break;
}
mtx_lock(sim->mtx);
xpt_done(ccb);
mtx_unlock(sim->mtx);
ctl_free_io(io);
}

View File

@ -73,8 +73,7 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl_error.h>
typedef enum {
CTLFE_CCB_DEFAULT = 0x00,
CTLFE_CCB_WAITING = 0x01
CTLFE_CCB_DEFAULT = 0x00
} ctlfe_ccb_types;
struct ctlfe_softc {
@ -82,6 +81,7 @@ struct ctlfe_softc {
path_id_t path_id;
struct cam_sim *sim;
char port_name[DEV_IDLEN];
struct mtx lun_softc_mtx;
STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
STAILQ_ENTRY(ctlfe_softc) links;
};
@ -320,7 +320,6 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
#ifdef CTLFE_INIT_ENABLE
if (ctlfe_num_targets >= ctlfe_max_targets) {
union ccb *ccb;
struct cam_sim *sim;
ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
M_NOWAIT | M_ZERO);
@ -328,18 +327,12 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
printf("%s: unable to malloc CCB!\n", __func__);
return;
}
xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path,
CAM_PRIORITY_NONE);
sim = xpt_path_sim(cpi->ccb_h.path);
xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
/* We should hold the SIM lock here */
mtx_assert(sim->mtx, MA_OWNED);
xpt_action(ccb);
if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
@ -381,7 +374,9 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
}
bus_softc->path_id = cpi->ccb_h.path_id;
bus_softc->sim = xpt_path_sim(cpi->ccb_h.path);
bus_softc->sim = xpt_path_sim(path);
mtx_init(&bus_softc->lun_softc_mtx, "LUN softc mtx", NULL,
MTX_DEF);
STAILQ_INIT(&bus_softc->lun_softc_list);
fe = &bus_softc->fe;
@ -435,6 +430,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
if (retval != 0) {
printf("%s: ctl_frontend_register() failed with "
"error %d!\n", __func__, retval);
mtx_destroy(&bus_softc->lun_softc_mtx);
free(bus_softc, M_CTLFE);
break;
} else {
@ -464,6 +460,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
* are no outstanding commands for this frontend?
*/
ctl_frontend_deregister(&softc->fe);
mtx_destroy(&softc->lun_softc_mtx);
free(softc, M_CTLFE);
}
break;
@ -538,19 +535,18 @@ ctlferegister(struct cam_periph *periph, void *arg)
{
struct ctlfe_softc *bus_softc;
struct ctlfe_lun_softc *softc;
struct cam_sim *sim;
union ccb en_lun_ccb;
cam_status status;
int i;
softc = (struct ctlfe_lun_softc *)arg;
bus_softc = softc->parent_softc;
sim = xpt_path_sim(periph->path);
TAILQ_INIT(&softc->work_queue);
softc->periph = periph;
callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0);
callout_init_mtx(&softc->dma_callout, xpt_path_mtx(periph->path),
/*flags*/ 0);
periph->softc = softc;
xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
@ -580,6 +576,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
new_ccb->ccb_h.cbfcnp = ctlfedone;
new_ccb->ccb_h.flags |= CAM_UNLOCKED;
xpt_action(new_ccb);
softc->atios_sent++;
status = new_ccb->ccb_h.status;
@ -615,6 +612,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
new_ccb->ccb_h.cbfcnp = ctlfedone;
new_ccb->ccb_h.flags |= CAM_UNLOCKED;
xpt_action(new_ccb);
softc->inots_sent++;
status = new_ccb->ccb_h.status;
@ -646,6 +644,7 @@ ctlfeoninvalidate(struct cam_periph *periph)
{
union ccb en_lun_ccb;
cam_status status;
struct ctlfe_softc *bus_softc;
struct ctlfe_lun_softc *softc;
softc = (struct ctlfe_lun_softc *)periph->softc;
@ -668,21 +667,22 @@ ctlfeoninvalidate(struct cam_periph *periph)
"INOTs outstanding, %d refs\n", softc->atios_sent -
softc->atios_returned, softc->inots_sent -
softc->inots_returned, periph->refcount);
bus_softc = softc->parent_softc;
mtx_lock(&bus_softc->lun_softc_mtx);
STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
mtx_unlock(&bus_softc->lun_softc_mtx);
}
static void
ctlfecleanup(struct cam_periph *periph)
{
struct ctlfe_lun_softc *softc;
struct ctlfe_softc *bus_softc;
xpt_print(periph->path, "%s: Called\n", __func__);
softc = (struct ctlfe_lun_softc *)periph->softc;
bus_softc = softc->parent_softc;
STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
/*
* XXX KDM is there anything else that needs to be done here?
*/
@ -705,14 +705,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT;
ccb_h = TAILQ_FIRST(&softc->work_queue);
if (periph->immediate_priority <= periph->pinfo.priority) {
panic("shouldn't get to the CCB waiting case!");
start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
} else if (ccb_h == NULL) {
if (ccb_h == NULL) {
softc->ccbs_freed++;
xpt_release_ccb(start_ccb);
} else {
@ -791,7 +784,6 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
}
start_ccb->ccb_h.func_code = XPT_ABORT;
start_ccb->cab.abort_ccb = (union ccb *)atio;
start_ccb->ccb_h.cbfcnp = ctlfedone;
/* Tell the SIM that we've aborted this ATIO */
xpt_action(start_ccb);
@ -1004,6 +996,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
/*data_ptr*/ data_ptr,
/*dxfer_len*/ dxfer_len,
/*timeout*/ 5 * 1000);
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
start_ccb->ccb_h.ccb_atio = atio;
if (((flags & CAM_SEND_STATUS) == 0)
&& (io != NULL))
@ -1011,7 +1004,9 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
softc->ctios_sent++;
cam_periph_unlock(periph);
xpt_action(start_ccb);
cam_periph_lock(periph);
if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
cam_release_devq(periph->path,
@ -1148,7 +1143,10 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
struct ctlfe_softc *bus_softc;
struct ccb_accept_tio *atio = NULL;
union ctl_io *io = NULL;
struct mtx *mtx;
KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
("CCB in ctlfedone() without CAM_UNLOCKED flag"));
#ifdef CTLFE_DEBUG
printf("%s: entered, func_code = %#x, type = %#lx\n", __func__,
done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type);
@ -1156,12 +1154,8 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
softc = (struct ctlfe_lun_softc *)periph->softc;
bus_softc = softc->parent_softc;
if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) {
panic("shouldn't get to the CCB waiting case!");
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
/*
* If the peripheral is invalid, ATIOs and immediate notify CCBs
@ -1177,7 +1171,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
case XPT_IMMEDIATE_NOTIFY:
case XPT_NOTIFY_ACKNOWLEDGE:
ctlfe_free_ccb(periph, done_ccb);
return;
goto out;
default:
break;
}
@ -1215,6 +1209,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
xpt_schedule(periph, /*priority*/ 1);
break;
}
mtx_unlock(mtx);
ctl_zero_io(io);
/* Save pointers on both sides */
@ -1271,7 +1266,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
#endif
ctl_queue(io);
break;
return;
}
case XPT_CONT_TARGET_IO: {
int srr = 0;
@ -1333,7 +1328,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
periph_links.tqe);
xpt_schedule(periph, /*priority*/ 1);
return;
break;
}
/*
@ -1359,10 +1354,11 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
}
if (periph->flags & CAM_PERIPH_INVALID) {
ctlfe_free_ccb(periph, (union ccb *)atio);
return;
} else {
xpt_action((union ccb *)atio);
softc->atios_sent++;
mtx_unlock(mtx);
xpt_action((union ccb *)atio);
return;
}
} else {
struct ctlfe_lun_cmd_info *cmd_info;
@ -1478,10 +1474,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
/*dxfer_len*/ dxfer_len,
/*timeout*/ 5 * 1000);
csio->ccb_h.flags |= CAM_UNLOCKED;
csio->resid = 0;
csio->ccb_h.ccb_atio = atio;
io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
softc->ctios_sent++;
mtx_unlock(mtx);
xpt_action((union ccb *)csio);
} else {
/*
@ -1490,10 +1488,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
softc->ccbs_freed++;
xpt_release_ccb(done_ccb);
mtx_unlock(mtx);
/* Call the backend move done callback */
io->scsiio.be_move_done(io);
}
return;
}
break;
}
@ -1614,7 +1614,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
ctl_free_io(io);
ctlfe_free_ccb(periph, done_ccb);
return;
goto out;
}
if (send_ctl_io != 0) {
ctl_queue(io);
@ -1651,12 +1651,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
xpt_action(done_ccb);
softc->inots_sent++;
break;
case XPT_ABORT:
/*
* XPT_ABORT is an immediate CCB, we shouldn't get here.
*/
panic("%s: XPT_ABORT CCB returned!", __func__);
break;
case XPT_SET_SIM_KNOB:
case XPT_GET_SIM_KNOB:
break;
@ -1665,6 +1659,9 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
done_ccb->ccb_h.func_code);
break;
}
out:
mtx_unlock(mtx);
}
static void
@ -1674,17 +1671,12 @@ ctlfe_onoffline(void *arg, int online)
union ccb *ccb;
cam_status status;
struct cam_path *path;
struct cam_sim *sim;
int set_wwnn;
bus_softc = (struct ctlfe_softc *)arg;
set_wwnn = 0;
sim = bus_softc->sim;
mtx_assert(sim->mtx, MA_OWNED);
status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
if (status != CAM_REQ_CMP) {
@ -1844,12 +1836,8 @@ ctlfe_online(void *arg)
struct cam_path *path;
cam_status status;
struct ctlfe_lun_softc *lun_softc;
struct cam_sim *sim;
bus_softc = (struct ctlfe_softc *)arg;
sim = bus_softc->sim;
CAM_SIM_LOCK(sim);
/*
* Create the wildcard LUN before bringing the port online.
@ -1860,7 +1848,6 @@ ctlfe_online(void *arg)
if (status != CAM_REQ_CMP) {
printf("%s: unable to create path for wildcard periph\n",
__func__);
CAM_SIM_UNLOCK(sim);
return;
}
@ -1870,15 +1857,16 @@ ctlfe_online(void *arg)
xpt_print(path, "%s: unable to allocate softc for "
"wildcard periph\n", __func__);
xpt_free_path(path);
CAM_SIM_UNLOCK(sim);
return;
}
xpt_path_lock(path);
lun_softc->parent_softc = bus_softc;
lun_softc->flags |= CTLFE_LUN_WILDCARD;
mtx_lock(&bus_softc->lun_softc_mtx);
STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links);
mtx_unlock(&bus_softc->lun_softc_mtx);
status = cam_periph_alloc(ctlferegister,
ctlfeoninvalidate,
@ -1901,11 +1889,10 @@ ctlfe_online(void *arg)
entry->status_text : "Unknown", status);
}
xpt_free_path(path);
ctlfe_onoffline(arg, /*online*/ 1);
CAM_SIM_UNLOCK(sim);
xpt_path_unlock(path);
xpt_free_path(path);
}
static void
@ -1915,14 +1902,8 @@ ctlfe_offline(void *arg)
struct cam_path *path;
cam_status status;
struct cam_periph *periph;
struct cam_sim *sim;
bus_softc = (struct ctlfe_softc *)arg;
sim = bus_softc->sim;
CAM_SIM_LOCK(sim);
ctlfe_onoffline(arg, /*online*/ 0);
/*
* Disable the wildcard LUN for this port now that we have taken
@ -1932,19 +1913,20 @@ ctlfe_offline(void *arg)
bus_softc->path_id, CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD);
if (status != CAM_REQ_CMP) {
CAM_SIM_UNLOCK(sim);
printf("%s: unable to create path for wildcard periph\n",
__func__);
return;
}
xpt_path_lock(path);
ctlfe_onoffline(arg, /*online*/ 0);
if ((periph = cam_periph_find(path, "ctl")) != NULL)
cam_periph_invalidate(periph);
xpt_path_unlock(path);
xpt_free_path(path);
CAM_SIM_UNLOCK(sim);
}
static int
@ -1970,15 +1952,13 @@ ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
struct ctlfe_lun_softc *softc;
struct cam_path *path;
struct cam_periph *periph;
struct cam_sim *sim;
cam_status status;
bus_softc = (struct ctlfe_softc *)arg;
sim = bus_softc->sim;
status = xpt_create_path_unlocked(&path, /*periph*/ NULL,
bus_softc->path_id,
targ_id.id, lun_id);
status = xpt_create_path(&path, /*periph*/ NULL,
bus_softc->path_id,
targ_id.id, lun_id);
/* XXX KDM need some way to return status to CTL here? */
if (status != CAM_REQ_CMP) {
printf("%s: could not create path, status %#x\n", __func__,
@ -1987,18 +1967,20 @@ ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
}
softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
CAM_SIM_LOCK(sim);
xpt_path_lock(path);
periph = cam_periph_find(path, "ctl");
if (periph != NULL) {
/* We've already got a periph, no need to alloc a new one. */
xpt_path_unlock(path);
xpt_free_path(path);
free(softc, M_CTLFE);
CAM_SIM_UNLOCK(sim);
return (0);
}
softc->parent_softc = bus_softc;
mtx_lock(&bus_softc->lun_softc_mtx);
STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
mtx_unlock(&bus_softc->lun_softc_mtx);
status = cam_periph_alloc(ctlferegister,
ctlfeoninvalidate,
@ -2011,10 +1993,8 @@ ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
0,
softc);
xpt_path_unlock(path);
xpt_free_path(path);
CAM_SIM_UNLOCK(sim);
return (0);
}
@ -2027,12 +2007,10 @@ ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
{
struct ctlfe_softc *softc;
struct ctlfe_lun_softc *lun_softc;
struct cam_sim *sim;
softc = (struct ctlfe_softc *)arg;
sim = softc->sim;
CAM_SIM_LOCK(sim);
mtx_lock(&softc->lun_softc_mtx);
STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
struct cam_path *path;
@ -2044,16 +2022,18 @@ ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
}
}
if (lun_softc == NULL) {
CAM_SIM_UNLOCK(sim);
mtx_unlock(&softc->lun_softc_mtx);
printf("%s: can't find target %d lun %d\n", __func__,
targ_id.id, lun_id);
return (1);
}
cam_periph_acquire(lun_softc->periph);
mtx_unlock(&softc->lun_softc_mtx);
cam_periph_lock(lun_softc->periph);
cam_periph_invalidate(lun_softc->periph);
CAM_SIM_UNLOCK(sim);
cam_periph_unlock(lun_softc->periph);
cam_periph_release(lun_softc->periph);
return (0);
}
@ -2064,12 +2044,6 @@ ctlfe_dump_sim(struct cam_sim *sim)
printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
sim->sim_name, sim->unit_number,
sim->max_tagged_dev_openings, sim->max_dev_openings);
printf("%s%d: max_ccbs: %u, ccb_count: %u\n",
sim->sim_name, sim->unit_number,
sim->max_ccbs, sim->ccb_count);
printf("%s%d: ccb_freeq is %sempty\n",
sim->sim_name, sim->unit_number,
(SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT ");
printf("\n");
}
@ -2200,17 +2174,13 @@ static void
ctlfe_datamove_done(union ctl_io *io)
{
union ccb *ccb;
struct cam_sim *sim;
struct cam_periph *periph;
struct ctlfe_lun_softc *softc;
ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
sim = xpt_path_sim(ccb->ccb_h.path);
CAM_SIM_LOCK(sim);
periph = xpt_path_periph(ccb->ccb_h.path);
cam_periph_lock(periph);
softc = (struct ctlfe_lun_softc *)periph->softc;
@ -2255,7 +2225,7 @@ ctlfe_datamove_done(union ctl_io *io)
xpt_schedule(periph, /*priority*/ 1);
}
CAM_SIM_UNLOCK(sim);
cam_periph_unlock(periph);
}
static void

View File

@ -118,7 +118,6 @@ typedef enum {
typedef enum {
CD_CCB_PROBE = 0x01,
CD_CCB_BUFFER_IO = 0x02,
CD_CCB_WAITING = 0x03,
CD_CCB_TUR = 0x04,
CD_CCB_TYPE_MASK = 0x0F,
CD_CCB_RETRY_UA = 0x10
@ -549,7 +548,7 @@ cdasync(void *callback_arg, u_int32_t code,
status = cam_periph_alloc(cdregister, cdoninvalidate,
cdcleanup, cdstart,
"cd", CAM_PERIPH_BIO,
cgd->ccb_h.path, cdasync,
path, cdasync,
AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -980,9 +979,9 @@ cdregister(struct cam_periph *periph, void *arg)
STAILQ_INIT(&nchanger->chluns);
callout_init_mtx(&nchanger->long_handle,
periph->sim->mtx, 0);
cam_periph_mtx(periph), 0);
callout_init_mtx(&nchanger->short_handle,
periph->sim->mtx, 0);
cam_periph_mtx(periph), 0);
mtx_lock(&changerq_mtx);
num_changers++;
@ -1051,7 +1050,7 @@ cdregister(struct cam_periph *periph, void *arg)
/*
* Schedule a periodic media polling events.
*/
callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0);
callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
if ((softc->flags & CD_FLAG_DISC_REMOVABLE) &&
(softc->flags & CD_FLAG_CHANGER) == 0 &&
(cgd->inq_flags & SID_AEN) == 0 &&
@ -1535,14 +1534,7 @@ cdstart(struct cam_periph *periph, union ccb *start_ccb)
case CD_STATE_NORMAL:
{
bp = bioq_first(&softc->bio_queue);
if (periph->immediate_priority <= periph->pinfo.priority) {
start_ccb->ccb_h.ccb_state = CD_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
} else if (bp == NULL) {
if (bp == NULL) {
if (softc->tur) {
softc->tur = 0;
csio = &start_ccb->csio;
@ -1606,11 +1598,9 @@ cdstart(struct cam_periph *periph, union ccb *start_ccb)
xpt_action(start_ccb);
}
if (bp != NULL || softc->tur ||
periph->immediate_priority != CAM_PRIORITY_NONE) {
if (bp != NULL || softc->tur) {
/* Have more work to do, so ensure we stay scheduled */
xpt_schedule(periph, min(CAM_PRIORITY_NORMAL,
periph->immediate_priority));
xpt_schedule(periph, CAM_PRIORITY_NORMAL);
}
break;
}
@ -1895,15 +1885,6 @@ cddone(struct cam_periph *periph, union ccb *done_ccb)
cam_periph_unhold(periph);
return;
}
case CD_CCB_WAITING:
{
/* Caller will release the CCB */
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
("trying to wakeup ccbwait\n"));
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
case CD_CCB_TUR:
{
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {

View File

@ -116,8 +116,7 @@ typedef enum {
} ch_state;
typedef enum {
CH_CCB_PROBE,
CH_CCB_WAITING
CH_CCB_PROBE
} ch_ccb_types;
typedef enum {
@ -248,20 +247,19 @@ chinit(void)
static void
chdevgonecb(void *arg)
{
struct cam_sim *sim;
struct ch_softc *softc;
struct cam_periph *periph;
struct mtx *mtx;
int i;
periph = (struct cam_periph *)arg;
sim = periph->sim;
softc = (struct ch_softc *)periph->softc;
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
softc = (struct ch_softc *)periph->softc;
KASSERT(softc->open_count >= 0, ("Negative open count %d",
softc->open_count));
mtx_lock(sim->mtx);
/*
* When we get this callback, we will get no more close calls from
* devfs. So if we have any dangling opens, we need to release the
@ -278,13 +276,13 @@ chdevgonecb(void *arg)
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the final call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
* with a cam_periph_unlock() call would cause a page fault.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
}
static void
@ -350,7 +348,7 @@ chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
*/
status = cam_periph_alloc(chregister, choninvalidate,
chcleanup, chstart, "ch",
CAM_PERIPH_BIO, cgd->ccb_h.path,
CAM_PERIPH_BIO, path,
chasync, AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -503,25 +501,23 @@ chopen(struct cdev *dev, int flags, int fmt, struct thread *td)
static int
chclose(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct ch_softc *softc;
struct mtx *mtx;
periph = (struct cam_periph *)dev->si_drv1;
if (periph == NULL)
return(ENXIO);
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
sim = periph->sim;
softc = (struct ch_softc *)periph->softc;
mtx_lock(sim->mtx);
softc->open_count--;
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
@ -532,7 +528,7 @@ chclose(struct cdev *dev, int flag, int fmt, struct thread *td)
* protect the open count and avoid another lock acquisition and
* release.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
return(0);
}
@ -547,14 +543,7 @@ chstart(struct cam_periph *periph, union ccb *start_ccb)
switch (softc->state) {
case CH_STATE_NORMAL:
{
if (periph->immediate_priority <= periph->pinfo.priority){
start_ccb->ccb_h.ccb_state = CH_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
}
xpt_release_ccb(start_ccb);
break;
}
case CH_STATE_PROBE:
@ -734,12 +723,6 @@ chdone(struct cam_periph *periph, union ccb *done_ccb)
cam_periph_unhold(periph);
return;
}
case CH_CCB_WAITING:
{
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
default:
break;
}
@ -1724,10 +1707,8 @@ chscsiversion(struct cam_periph *periph)
struct scsi_inquiry_data *inq_data;
struct ccb_getdev *cgd;
int dev_scsi_version;
struct cam_sim *sim;
sim = xpt_path_sim(periph->path);
mtx_assert(sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) == NULL)
return (-1);
/*

View File

@ -84,7 +84,7 @@ typedef enum {
DA_FLAG_PACK_LOCKED = 0x004,
DA_FLAG_PACK_REMOVABLE = 0x008,
DA_FLAG_NEED_OTAG = 0x020,
DA_FLAG_WENT_IDLE = 0x040,
DA_FLAG_WAS_OTAG = 0x040,
DA_FLAG_RETRY_UA = 0x080,
DA_FLAG_OPEN = 0x100,
DA_FLAG_SCTX_INIT = 0x200,
@ -118,7 +118,6 @@ typedef enum {
DA_CCB_PROBE_BDC = 0x05,
DA_CCB_PROBE_ATA = 0x06,
DA_CCB_BUFFER_IO = 0x07,
DA_CCB_WAITING = 0x08,
DA_CCB_DUMP = 0x0A,
DA_CCB_DELETE = 0x0B,
DA_CCB_TUR = 0x0C,
@ -199,19 +198,17 @@ struct da_softc {
struct bio_queue_head bio_queue;
struct bio_queue_head delete_queue;
struct bio_queue_head delete_run_queue;
SLIST_ENTRY(da_softc) links;
LIST_HEAD(, ccb_hdr) pending_ccbs;
int tur; /* TEST UNIT READY should be sent */
int refcount; /* Active xpt_action() calls */
da_state state;
da_flags flags;
da_quirks quirks;
int sort_io_queue;
int minimum_cmd_size;
int error_inject;
int ordered_tag_count;
int outstanding_cmds;
int trim_max_ranges;
int delete_running;
int tur;
int delete_available; /* Delete methods possibly available */
uint32_t unmap_max_ranges;
uint32_t unmap_max_lba;
@ -1269,86 +1266,72 @@ daclose(struct disk *dp)
{
struct cam_periph *periph;
struct da_softc *softc;
union ccb *ccb;
int error;
periph = (struct cam_periph *)dp->d_drv1;
cam_periph_lock(periph);
if (cam_periph_hold(periph, PRIBIO) != 0) {
cam_periph_unlock(periph);
cam_periph_release(periph);
return (0);
}
softc = (struct da_softc *)periph->softc;
cam_periph_lock(periph);
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
("daclose\n"));
if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
(softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
(softc->flags & DA_FLAG_PACK_INVALID) == 0) {
union ccb *ccb;
if (cam_periph_hold(periph, PRIBIO) == 0) {
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
/* Flush disk cache. */
if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
(softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
(softc->flags & DA_FLAG_PACK_INVALID) == 0) {
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
scsi_synchronize_cache(&ccb->csio, /*retries*/1,
/*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
/*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
5 * 60 * 1000);
error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
/*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
softc->disk->d_devstat);
if (error == 0)
softc->flags &= ~DA_FLAG_DIRTY;
xpt_release_ccb(ccb);
}
scsi_synchronize_cache(&ccb->csio,
/*retries*/1,
/*cbfcnp*/dadone,
MSG_SIMPLE_Q_TAG,
/*begin_lba*/0,/* Cover the whole disk */
/*lb_count*/0,
SSD_FULL_SIZE,
5 * 60 * 1000);
error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
/*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
softc->disk->d_devstat);
if (error == 0)
softc->flags &= ~DA_FLAG_DIRTY;
xpt_release_ccb(ccb);
}
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
/* Allow medium removal. */
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
(softc->quirks & DA_Q_NO_PREVENT) == 0)
daprevent(periph, PR_ALLOW);
/*
* If we've got removeable media, mark the blocksize as
* unavailable, since it could change when new media is
* inserted.
*/
softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
cam_periph_unhold(periph);
}
/*
* If we've got removeable media, mark the blocksize as
* unavailable, since it could change when new media is
* inserted.
*/
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
softc->flags &= ~DA_FLAG_OPEN;
cam_periph_unhold(periph);
while (softc->refcount != 0)
cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
cam_periph_unlock(periph);
cam_periph_release(periph);
return (0);
return (0);
}
static void
daschedule(struct cam_periph *periph)
{
struct da_softc *softc = (struct da_softc *)periph->softc;
uint32_t prio;
if (softc->state != DA_STATE_NORMAL)
return;
/* Check if cam_periph_getccb() was called. */
prio = periph->immediate_priority;
/* Check if we have more work to do. */
if (bioq_first(&softc->bio_queue) ||
(!softc->delete_running && bioq_first(&softc->delete_queue)) ||
softc->tur) {
prio = CAM_PRIORITY_NORMAL;
xpt_schedule(periph, CAM_PRIORITY_NORMAL);
}
/* Schedule CCB if any of above is true. */
if (prio != CAM_PRIORITY_NONE)
xpt_schedule(periph, prio);
}
/*
@ -1382,9 +1365,7 @@ dastrategy(struct bio *bp)
* Place it in the queue of disk activities for this disk
*/
if (bp->bio_cmd == BIO_DELETE) {
if (bp->bio_bcount == 0)
biodone(bp);
else if (DA_SIO)
if (DA_SIO)
bioq_disksort(&softc->delete_queue, bp);
else
bioq_insert_tail(&softc->delete_queue, bp);
@ -1621,7 +1602,7 @@ daasync(void *callback_arg, u_int32_t code,
status = cam_periph_alloc(daregister, daoninvalidate,
dacleanup, dastart,
"da", CAM_PERIPH_BIO,
cgd->ccb_h.path, daasync,
path, daasync,
AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -2066,7 +2047,7 @@ daregister(struct cam_periph *periph, void *arg)
* Schedule a periodic event to occasionally send an
* ordered tag to a device.
*/
callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
callout_reset(&softc->sendordered_c,
(da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
dasendorderedtag, softc);
@ -2186,7 +2167,7 @@ daregister(struct cam_periph *periph, void *arg)
/*
* Schedule a periodic media polling events.
*/
callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0);
callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
(cgd->inq_flags & SID_AEN) == 0 &&
da_poll_period != 0)
@ -2214,20 +2195,6 @@ skipstate:
struct bio *bp;
uint8_t tag_code;
/* Execute immediate CCB if waiting. */
if (periph->immediate_priority <= periph->pinfo.priority) {
CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
("queuing for immediate ccb\n"));
start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
/* May have more work to do, so ensure we stay scheduled */
daschedule(periph);
break;
}
/* Run BIO_DELETE if not running yet. */
if (!softc->delete_running &&
(bp = bioq_first(&softc->delete_queue)) != NULL) {
@ -2266,7 +2233,7 @@ skipstate:
if ((bp->bio_flags & BIO_ORDERED) != 0 ||
(softc->flags & DA_FLAG_NEED_OTAG) != 0) {
softc->flags &= ~DA_FLAG_NEED_OTAG;
softc->ordered_tag_count++;
softc->flags |= DA_FLAG_WAS_OTAG;
tag_code = MSG_ORDERED_Q_TAG;
} else {
tag_code = MSG_SIMPLE_Q_TAG;
@ -2316,15 +2283,11 @@ skipstate:
break;
}
start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
out:
/*
* Block out any asynchronous callbacks
* while we touch the pending ccb list.
*/
LIST_INSERT_HEAD(&softc->pending_ccbs,
&start_ccb->ccb_h, periph_links.le);
softc->outstanding_cmds++;
/* We expect a unit attention from this device */
if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
@ -2333,7 +2296,11 @@ out:
}
start_ccb->ccb_h.ccb_bp = bp;
softc->refcount++;
cam_periph_unlock(periph);
xpt_action(start_ccb);
cam_periph_lock(periph);
softc->refcount--;
/* May have more work to do, so ensure we stay scheduled */
daschedule(periph);
@ -2628,6 +2595,7 @@ da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
/*sense_len*/SSD_FULL_SIZE,
da_default_timeout * 1000);
ccb->ccb_h.ccb_state = DA_CCB_DELETE;
ccb->ccb_h.flags |= CAM_UNLOCKED;
}
static void
@ -2708,6 +2676,7 @@ da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
/*sense_len*/SSD_FULL_SIZE,
da_default_timeout * 1000);
ccb->ccb_h.ccb_state = DA_CCB_DELETE;
ccb->ccb_h.flags |= CAM_UNLOCKED;
}
/*
@ -2764,6 +2733,7 @@ da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
/*sense_len*/SSD_FULL_SIZE,
da_default_timeout * 1000);
ccb->ccb_h.ccb_state = DA_CCB_DELETE;
ccb->ccb_h.flags |= CAM_UNLOCKED;
}
static int
@ -2898,6 +2868,7 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
{
struct bio *bp, *bp1;
cam_periph_lock(periph);
bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
int error;
@ -2914,6 +2885,7 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
* A retry was scheduled, so
* just return.
*/
cam_periph_unlock(periph);
return;
}
bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
@ -2981,18 +2953,22 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
}
}
/*
* Block out any asynchronous callbacks
* while we touch the pending ccb list.
*/
LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
softc->outstanding_cmds--;
if (softc->outstanding_cmds == 0)
softc->flags |= DA_FLAG_WENT_IDLE;
if (LIST_EMPTY(&softc->pending_ccbs))
softc->flags |= DA_FLAG_WAS_OTAG;
xpt_release_ccb(done_ccb);
if (state == DA_CCB_DELETE) {
while ((bp1 = bioq_takefirst(&softc->delete_run_queue))
!= NULL) {
TAILQ_HEAD(, bio) queue;
TAILQ_INIT(&queue);
TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
softc->delete_run_queue.insert_point = NULL;
softc->delete_running = 0;
daschedule(periph);
cam_periph_unlock(periph);
while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
TAILQ_REMOVE(&queue, bp1, bio_queue);
bp1->bio_error = bp->bio_error;
if (bp->bio_flags & BIO_ERROR) {
bp1->bio_flags |= BIO_ERROR;
@ -3001,13 +2977,11 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
bp1->bio_resid = 0;
biodone(bp1);
}
softc->delete_running = 0;
if (bp != NULL)
biodone(bp);
daschedule(periph);
} else if (bp != NULL)
} else
cam_periph_unlock(periph);
if (bp != NULL)
biodone(bp);
break;
return;
}
case DA_CCB_PROBE_RC:
case DA_CCB_PROBE_RC16:
@ -3457,12 +3431,6 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
daprobedone(periph, done_ccb);
return;
}
case DA_CCB_WAITING:
{
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
case DA_CCB_DUMP:
/* No-op. We're polling */
return;
@ -3573,7 +3541,7 @@ damediapoll(void *arg)
struct cam_periph *periph = arg;
struct da_softc *softc = periph->softc;
if (!softc->tur && softc->outstanding_cmds == 0) {
if (!softc->tur && LIST_EMPTY(&softc->pending_ccbs)) {
if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
softc->tur = 1;
daschedule(periph);
@ -3745,14 +3713,11 @@ dasendorderedtag(void *arg)
struct da_softc *softc = arg;
if (da_send_ordered) {
if ((softc->ordered_tag_count == 0)
&& ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
softc->flags |= DA_FLAG_NEED_OTAG;
if (!LIST_EMPTY(&softc->pending_ccbs)) {
if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
softc->flags |= DA_FLAG_NEED_OTAG;
softc->flags &= ~DA_FLAG_WAS_OTAG;
}
if (softc->outstanding_cmds > 0)
softc->flags &= ~DA_FLAG_WENT_IDLE;
softc->ordered_tag_count = 0;
}
/* Queue us up again */
callout_reset(&softc->sendordered_c,

View File

@ -69,7 +69,6 @@ static periph_init_t enc_init;
static periph_ctor_t enc_ctor;
static periph_oninv_t enc_oninvalidate;
static periph_dtor_t enc_dtor;
static periph_start_t enc_start;
static void enc_async(void *, uint32_t, struct cam_path *, void *);
static enctyp enc_type(struct ccb_getdev *);
@ -113,17 +112,16 @@ enc_init(void)
static void
enc_devgonecb(void *arg)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct enc_softc *enc;
struct mtx *mtx;
int i;
periph = (struct cam_periph *)arg;
sim = periph->sim;
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
enc = (struct enc_softc *)periph->softc;
mtx_lock(sim->mtx);
/*
* When we get this callback, we will get no more close calls from
* devfs. So if we have any dangling opens, we need to release the
@ -140,13 +138,13 @@ enc_devgonecb(void *arg)
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the final call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
* with a cam_periph_unlock() call would cause a page fault.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
}
static void
@ -243,8 +241,8 @@ enc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
}
status = cam_periph_alloc(enc_ctor, enc_oninvalidate,
enc_dtor, enc_start, "ses", CAM_PERIPH_BIO,
cgd->ccb_h.path, enc_async, AC_FOUND_DEVICE, cgd);
enc_dtor, NULL, "ses", CAM_PERIPH_BIO,
path, enc_async, AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
printf("enc_async: Unable to probe new device due to "
@ -299,25 +297,23 @@ out:
static int
enc_close(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct enc_softc *enc;
struct mtx *mtx;
periph = (struct cam_periph *)dev->si_drv1;
if (periph == NULL)
return (ENXIO);
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
sim = periph->sim;
enc = periph->softc;
mtx_lock(sim->mtx);
enc->open_count--;
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
@ -328,34 +324,11 @@ enc_close(struct cdev *dev, int flag, int fmt, struct thread *td)
* protect the open count and avoid another lock acquisition and
* release.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
return (0);
}
static void
enc_start(struct cam_periph *p, union ccb *sccb)
{
struct enc_softc *enc;
enc = p->softc;
ENC_DLOG(enc, "%s enter imm=%d prio=%d\n",
__func__, p->immediate_priority, p->pinfo.priority);
if (p->immediate_priority <= p->pinfo.priority) {
SLIST_INSERT_HEAD(&p->ccb_list, &sccb->ccb_h, periph_links.sle);
p->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&p->ccb_list);
} else
xpt_release_ccb(sccb);
ENC_DLOG(enc, "%s exit\n", __func__);
}
void
enc_done(struct cam_periph *periph, union ccb *dccb)
{
wakeup(&dccb->ccb_h.cbfcnp);
}
int
enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags)
{
@ -614,7 +587,7 @@ enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp)
if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) {
tdlen = min(dlen, 1020);
tdlen = (tdlen + 3) & ~3;
cam_fill_ataio(&ccb->ataio, 0, enc_done, ddf, 0, dptr, tdlen,
cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen,
30 * 1000);
if (cdb[0] == RECEIVE_DIAGNOSTIC)
ata_28bit_cmd(&ccb->ataio,
@ -632,7 +605,7 @@ enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp)
0x80, tdlen / 4);
} else {
tdlen = dlen;
cam_fill_csio(&ccb->csio, 0, enc_done, ddf, MSG_SIMPLE_Q_TAG,
cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG,
dptr, dlen, sizeof (struct scsi_sense_data), cdbl,
60 * 1000);
bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl);
@ -886,7 +859,7 @@ enc_kproc_init(enc_softc_t *enc)
{
int result;
callout_init_mtx(&enc->status_updater, enc->periph->sim->mtx, 0);
callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0);
if (cam_periph_acquire(enc->periph) != CAM_REQ_CMP)
return (ENXIO);

View File

@ -192,7 +192,6 @@ struct ses_mgmt_mode_page {
/* Enclosure core interface for sub-drivers */
int enc_runcmd(struct enc_softc *, char *, int, char *, int *);
void enc_log(struct enc_softc *, const char *, ...);
void enc_done(struct cam_periph *, union ccb *);
int enc_error(union ccb *, uint32_t, uint32_t);
void enc_update_request(enc_softc_t *, uint32_t);

View File

@ -243,12 +243,12 @@ safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state,
if (enc->enc_type == ENC_SEMB_SAFT) {
semb_read_buffer(&ccb->ataio, /*retries*/5,
enc_done, MSG_SIMPLE_Q_TAG,
NULL, MSG_SIMPLE_Q_TAG,
state->page_code, buf, state->buf_size,
state->timeout);
} else {
scsi_read_buffer(&ccb->csio, /*retries*/5,
enc_done, MSG_SIMPLE_Q_TAG, 1,
NULL, MSG_SIMPLE_Q_TAG, 1,
state->page_code, 0, buf, state->buf_size,
SSD_FULL_SIZE, state->timeout);
}
@ -942,11 +942,11 @@ safte_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state,
if (enc->enc_type == ENC_SEMB_SAFT) {
semb_write_buffer(&ccb->ataio, /*retries*/5,
enc_done, MSG_SIMPLE_Q_TAG,
NULL, MSG_SIMPLE_Q_TAG,
buf, xfer_len, state->timeout);
} else {
scsi_write_buffer(&ccb->csio, /*retries*/5,
enc_done, MSG_SIMPLE_Q_TAG, 1,
NULL, MSG_SIMPLE_Q_TAG, 1,
0, 0, buf, xfer_len,
SSD_FULL_SIZE, state->timeout);
}

View File

@ -888,7 +888,6 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem,
struct device_match_result *device_match;
struct device_match_pattern *device_pattern;
ses_path_iter_args_t *args;
struct cam_sim *sim;
args = (ses_path_iter_args_t *)arg;
match_pattern.type = DEV_MATCH_DEVICE;
@ -901,10 +900,10 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem,
device_pattern->data.devid_pat.id_len);
memset(&cdm, 0, sizeof(cdm));
if (xpt_create_path_unlocked(&cdm.ccb_h.path, /*periph*/NULL,
CAM_XPT_PATH_ID,
CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD) != CAM_REQ_CMP)
if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL,
CAM_XPT_PATH_ID,
CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD) != CAM_REQ_CMP)
return;
cdm.ccb_h.func_code = XPT_DEV_MATCH;
@ -914,11 +913,8 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem,
cdm.match_buf_len = sizeof(match_result);
cdm.matches = &match_result;
sim = xpt_path_sim(cdm.ccb_h.path);
CAM_SIM_LOCK(sim);
xpt_action((union ccb *)&cdm);
xpt_free_path(cdm.ccb_h.path);
CAM_SIM_UNLOCK(sim);
if ((cdm.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
|| (cdm.status != CAM_DEV_MATCH_LAST
@ -927,18 +923,15 @@ ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem,
return;
device_match = &match_result.result.device_result;
if (xpt_create_path_unlocked(&cdm.ccb_h.path, /*periph*/NULL,
device_match->path_id,
device_match->target_id,
device_match->target_lun) != CAM_REQ_CMP)
if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL,
device_match->path_id,
device_match->target_id,
device_match->target_lun) != CAM_REQ_CMP)
return;
args->callback(enc, elem, cdm.ccb_h.path, args->callback_arg);
sim = xpt_path_sim(cdm.ccb_h.path);
CAM_SIM_LOCK(sim);
xpt_free_path(cdm.ccb_h.path);
CAM_SIM_UNLOCK(sim);
}
/**
@ -1186,7 +1179,7 @@ ses_set_timed_completion(enc_softc_t *enc, uint8_t tc_en)
if (mode_buf == NULL)
goto out;
scsi_mode_sense(&ccb->csio, /*retries*/4, enc_done, MSG_SIMPLE_Q_TAG,
scsi_mode_sense(&ccb->csio, /*retries*/4, NULL, MSG_SIMPLE_Q_TAG,
/*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SES_MGMT_MODE_PAGE_CODE,
mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000);
@ -1214,7 +1207,7 @@ ses_set_timed_completion(enc_softc_t *enc, uint8_t tc_en)
/* SES2r20: a completion time of zero means as long as possible */
bzero(&mgmt->max_comp_time, sizeof(mgmt->max_comp_time));
scsi_mode_select(&ccb->csio, 5, enc_done, MSG_SIMPLE_Q_TAG,
scsi_mode_select(&ccb->csio, 5, NULL, MSG_SIMPLE_Q_TAG,
/*page_fmt*/FALSE, /*save_pages*/TRUE, mode_buf, mode_buf_len,
SSD_FULL_SIZE, /*timeout*/60 * 1000);
@ -2030,12 +2023,12 @@ ses_fill_rcv_diag_io(enc_softc_t *enc, struct enc_fsm_state *state,
if (enc->enc_type == ENC_SEMB_SES) {
semb_receive_diagnostic_results(&ccb->ataio, /*retries*/5,
enc_done, MSG_SIMPLE_Q_TAG, /*pcv*/1,
NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1,
state->page_code, buf, state->buf_size,
state->timeout);
} else {
scsi_receive_diagnostic_results(&ccb->csio, /*retries*/5,
enc_done, MSG_SIMPLE_Q_TAG, /*pcv*/1,
NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1,
state->page_code, buf, state->buf_size,
SSD_FULL_SIZE, state->timeout);
}
@ -2153,12 +2146,12 @@ ses_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state,
/* Fill out the ccb */
if (enc->enc_type == ENC_SEMB_SES) {
semb_send_diagnostic(&ccb->ataio, /*retries*/5, enc_done,
semb_send_diagnostic(&ccb->ataio, /*retries*/5, NULL,
MSG_SIMPLE_Q_TAG,
buf, ses_page_length(&ses_cache->status_page->hdr),
state->timeout);
} else {
scsi_send_diagnostic(&ccb->csio, /*retries*/5, enc_done,
scsi_send_diagnostic(&ccb->csio, /*retries*/5, NULL,
MSG_SIMPLE_Q_TAG, /*unit_offline*/0,
/*device_offline*/0, /*self_test*/0,
/*page_format*/1, /*self_test_code*/0,

View File

@ -65,8 +65,7 @@ typedef enum {
} pass_state;
typedef enum {
PASS_CCB_BUFFER_IO,
PASS_CCB_WAITING
PASS_CCB_BUFFER_IO
} pass_ccb_types;
#define ccb_type ppriv_field0
@ -94,12 +93,9 @@ static periph_init_t passinit;
static periph_ctor_t passregister;
static periph_oninv_t passoninvalidate;
static periph_dtor_t passcleanup;
static periph_start_t passstart;
static void pass_add_physpath(void *context, int pending);
static void passasync(void *callback_arg, u_int32_t code,
struct cam_path *path, void *arg);
static void passdone(struct cam_periph *periph,
union ccb *done_ccb);
static int passerror(union ccb *ccb, u_int32_t cam_flags,
u_int32_t sense_flags);
static int passsendccb(struct cam_periph *periph, union ccb *ccb,
@ -143,20 +139,19 @@ passinit(void)
static void
passdevgonecb(void *arg)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct mtx *mtx;
struct pass_softc *softc;
int i;
periph = (struct cam_periph *)arg;
sim = periph->sim;
softc = (struct pass_softc *)periph->softc;
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
softc = (struct pass_softc *)periph->softc;
KASSERT(softc->open_count >= 0, ("Negative open count %d",
softc->open_count));
mtx_lock(sim->mtx);
/*
* When we get this callback, we will get no more close calls from
* devfs. So if we have any dangling opens, we need to release the
@ -173,13 +168,13 @@ passdevgonecb(void *arg)
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the final call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
* with a cam_periph_unlock() call would cause a page fault.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
}
static void
@ -295,8 +290,8 @@ passasync(void *callback_arg, u_int32_t code,
* process.
*/
status = cam_periph_alloc(passregister, passoninvalidate,
passcleanup, passstart, "pass",
CAM_PERIPH_BIO, cgd->ccb_h.path,
passcleanup, NULL, "pass",
CAM_PERIPH_BIO, path,
passasync, AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -498,25 +493,23 @@ passopen(struct cdev *dev, int flags, int fmt, struct thread *td)
static int
passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct pass_softc *softc;
struct mtx *mtx;
periph = (struct cam_periph *)dev->si_drv1;
if (periph == NULL)
return (ENXIO);
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
sim = periph->sim;
softc = periph->softc;
mtx_lock(sim->mtx);
softc->open_count--;
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
@ -527,46 +520,11 @@ passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
* protect the open count and avoid another lock acquisition and
* release.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
return (0);
}
static void
passstart(struct cam_periph *periph, union ccb *start_ccb)
{
struct pass_softc *softc;
softc = (struct pass_softc *)periph->softc;
switch (softc->state) {
case PASS_STATE_NORMAL:
start_ccb->ccb_h.ccb_type = PASS_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
break;
}
}
static void
passdone(struct cam_periph *periph, union ccb *done_ccb)
{
struct pass_softc *softc;
struct ccb_scsiio *csio;
softc = (struct pass_softc *)periph->softc;
csio = &done_ccb->csio;
switch (csio->ccb_h.ccb_type) {
case PASS_CCB_WAITING:
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
xpt_release_ccb(done_ccb);
}
static int
passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
{
@ -685,12 +643,6 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
*/
xpt_merge_ccb(ccb, inccb);
/*
* There's no way for the user to have a completion
* function, so we put our own completion function in here.
*/
ccb->ccb_h.cbfcnp = passdone;
/*
* Let cam_periph_mapmem do a sanity check on the data pointer format.
* Even if no data transfer is needed, it's a cheap check and it

View File

@ -66,7 +66,6 @@ typedef enum {
typedef enum {
PT_CCB_BUFFER_IO = 0x01,
PT_CCB_WAITING = 0x02,
PT_CCB_RETRY_UA = 0x04,
PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA
} pt_ccb_state;
@ -378,7 +377,7 @@ ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
*/
status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor,
ptstart, "pt", CAM_PERIPH_BIO,
cgd->ccb_h.path, ptasync,
path, ptasync,
AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -423,15 +422,7 @@ ptstart(struct cam_periph *periph, union ccb *start_ccb)
* See if there is a buf with work for us to do..
*/
bp = bioq_first(&softc->bio_queue);
if (periph->immediate_priority <= periph->pinfo.priority) {
CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
("queuing for immediate ccb\n"));
start_ccb->ccb_h.ccb_state = PT_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
} else if (bp == NULL) {
if (bp == NULL) {
xpt_release_ccb(start_ccb);
} else {
bioq_remove(&softc->bio_queue, bp);
@ -554,10 +545,6 @@ ptdone(struct cam_periph *periph, union ccb *done_ccb)
biofinish(bp, softc->device_stats, 0);
break;
}
case PT_CCB_WAITING:
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
xpt_release_ccb(done_ccb);
}

View File

@ -114,7 +114,6 @@ typedef enum {
#define ccb_bp ppriv_ptr1
#define SA_CCB_BUFFER_IO 0x0
#define SA_CCB_WAITING 0x1
#define SA_CCB_TYPEMASK 0x1
#define SA_POSITION_UPDATED 0x2
@ -1453,7 +1452,7 @@ saasync(void *callback_arg, u_int32_t code,
*/
status = cam_periph_alloc(saregister, saoninvalidate,
sacleanup, sastart,
"sa", CAM_PERIPH_BIO, cgd->ccb_h.path,
"sa", CAM_PERIPH_BIO, path,
saasync, AC_FOUND_DEVICE, cgd);
if (status != CAM_REQ_CMP
@ -1722,15 +1721,7 @@ sastart(struct cam_periph *periph, union ccb *start_ccb)
* See if there is a buf with work for us to do..
*/
bp = bioq_first(&softc->bio_queue);
if (periph->immediate_priority <= periph->pinfo.priority) {
CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
("queuing for immediate ccb\n"));
Set_CCB_Type(start_ccb, SA_CCB_WAITING);
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
} else if (bp == NULL) {
if (bp == NULL) {
xpt_release_ccb(start_ccb);
} else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) {
struct bio *done_bp;
@ -1953,12 +1944,6 @@ sadone(struct cam_periph *periph, union ccb *done_ccb)
biofinish(bp, softc->device_stats, 0);
break;
}
case SA_CCB_WAITING:
{
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
}
xpt_release_ccb(done_ccb);
}
@ -2545,7 +2530,8 @@ saerror(union ccb *ccb, u_int32_t cflgs, u_int32_t sflgs)
/*
* If a read/write command, we handle it here.
*/
if (CCB_Type(csio) != SA_CCB_WAITING) {
if (csio->cdb_io.cdb_bytes[0] == SA_READ ||
csio->cdb_io.cdb_bytes[0] == SA_WRITE) {
break;
}
/*

View File

@ -76,8 +76,7 @@ typedef enum {
} sg_rdwr_state;
typedef enum {
SG_CCB_RDWR_IO,
SG_CCB_WAITING
SG_CCB_RDWR_IO
} sg_ccb_types;
#define ccb_type ppriv_field0
@ -119,7 +118,6 @@ static periph_init_t sginit;
static periph_ctor_t sgregister;
static periph_oninv_t sgoninvalidate;
static periph_dtor_t sgcleanup;
static periph_start_t sgstart;
static void sgasync(void *callback_arg, uint32_t code,
struct cam_path *path, void *arg);
static void sgdone(struct cam_periph *periph, union ccb *done_ccb);
@ -172,20 +170,19 @@ sginit(void)
static void
sgdevgonecb(void *arg)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct sg_softc *softc;
struct mtx *mtx;
int i;
periph = (struct cam_periph *)arg;
sim = periph->sim;
softc = (struct sg_softc *)periph->softc;
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
softc = (struct sg_softc *)periph->softc;
KASSERT(softc->open_count >= 0, ("Negative open count %d",
softc->open_count));
mtx_lock(sim->mtx);
/*
* When we get this callback, we will get no more close calls from
* devfs. So if we have any dangling opens, we need to release the
@ -202,13 +199,13 @@ sgdevgonecb(void *arg)
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the final call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
* with a cam_periph_unlock() call would cause a page fault.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
}
@ -277,8 +274,8 @@ sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
* start the probe process.
*/
status = cam_periph_alloc(sgregister, sgoninvalidate,
sgcleanup, sgstart, "sg",
CAM_PERIPH_BIO, cgd->ccb_h.path,
sgcleanup, NULL, "sg",
CAM_PERIPH_BIO, path,
sgasync, AC_FOUND_DEVICE, cgd);
if ((status != CAM_REQ_CMP) && (status != CAM_REQ_INPROG)) {
const struct cam_status_entry *entry;
@ -382,24 +379,6 @@ sgregister(struct cam_periph *periph, void *arg)
return (CAM_REQ_CMP);
}
static void
sgstart(struct cam_periph *periph, union ccb *start_ccb)
{
struct sg_softc *softc;
softc = (struct sg_softc *)periph->softc;
switch (softc->state) {
case SG_STATE_NORMAL:
start_ccb->ccb_h.ccb_type = SG_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
break;
}
}
static void
sgdone(struct cam_periph *periph, union ccb *done_ccb)
{
@ -409,10 +388,6 @@ sgdone(struct cam_periph *periph, union ccb *done_ccb)
softc = (struct sg_softc *)periph->softc;
csio = &done_ccb->csio;
switch (csio->ccb_h.ccb_type) {
case SG_CCB_WAITING:
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
case SG_CCB_RDWR_IO:
{
struct sg_rdwr *rdwr;
@ -480,25 +455,23 @@ sgopen(struct cdev *dev, int flags, int fmt, struct thread *td)
static int
sgclose(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct cam_sim *sim;
struct cam_periph *periph;
struct sg_softc *softc;
struct mtx *mtx;
periph = (struct cam_periph *)dev->si_drv1;
if (periph == NULL)
return (ENXIO);
mtx = cam_periph_mtx(periph);
mtx_lock(mtx);
sim = periph->sim;
softc = periph->softc;
mtx_lock(sim->mtx);
softc->open_count--;
cam_periph_release_locked(periph);
/*
* We reference the SIM lock directly here, instead of using
* We reference the lock directly here, instead of using
* cam_periph_unlock(). The reason is that the call to
* cam_periph_release_locked() above could result in the periph
* getting freed. If that is the case, dereferencing the periph
@ -509,7 +482,7 @@ sgclose(struct cdev *dev, int flag, int fmt, struct thread *td)
* protect the open count and avoid another lock acquisition and
* release.
*/
mtx_unlock(sim->mtx);
mtx_unlock(mtx);
return (0);
}
@ -879,7 +852,7 @@ search:
break;
}
if ((rdwr == NULL) || (rdwr->state != SG_RDWR_DONE)) {
if (msleep(rdwr, periph->sim->mtx, PCATCH, "sgread", 0) == ERESTART)
if (cam_periph_sleep(periph, rdwr, PCATCH, "sgread", 0) == ERESTART)
return (EAGAIN);
goto search;
}

View File

@ -65,8 +65,7 @@ typedef enum {
} targbh_flags;
typedef enum {
TARGBH_CCB_WORKQ,
TARGBH_CCB_WAITING
TARGBH_CCB_WORKQ
} targbh_ccb_types;
#define MAX_ACCEPT 8
@ -431,7 +430,7 @@ targbhdtor(struct cam_periph *periph)
/* FALLTHROUGH */
default:
/* XXX Wait for callback of targbhdislun() */
msleep(softc, periph->sim->mtx, PRIBIO, "targbh", hz/2);
cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2);
free(softc, M_SCSIBH);
break;
}
@ -450,13 +449,7 @@ targbhstart(struct cam_periph *periph, union ccb *start_ccb)
softc = (struct targbh_softc *)periph->softc;
ccbh = TAILQ_FIRST(&softc->work_queue);
if (periph->immediate_priority <= periph->pinfo.priority) {
start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING;
SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
periph_links.sle);
periph->immediate_priority = CAM_PRIORITY_NONE;
wakeup(&periph->ccb_list);
} else if (ccbh == NULL) {
if (ccbh == NULL) {
xpt_release_ccb(start_ccb);
} else {
TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
@ -535,12 +528,6 @@ targbhdone(struct cam_periph *periph, union ccb *done_ccb)
softc = (struct targbh_softc *)periph->softc;
if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) {
/* Caller will release the CCB */
wakeup(&done_ccb->ccb_h.cbfcnp);
return;
}
switch (done_ccb->ccb_h.func_code) {
case XPT_ACCEPT_TARGET_IO:
{

View File

@ -236,23 +236,21 @@ targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
{
struct ioc_enable_lun *new_lun;
struct cam_path *path;
struct cam_sim *sim;
new_lun = (struct ioc_enable_lun *)addr;
status = xpt_create_path_unlocked(&path, /*periph*/NULL,
new_lun->path_id,
new_lun->target_id,
new_lun->lun_id);
status = xpt_create_path(&path, /*periph*/NULL,
new_lun->path_id,
new_lun->target_id,
new_lun->lun_id);
if (status != CAM_REQ_CMP) {
printf("Couldn't create path, status %#x\n", status);
break;
}
sim = xpt_path_sim(path);
mtx_lock(sim->mtx);
xpt_path_lock(path);
status = targenable(softc, path, new_lun->grp6_len,
new_lun->grp7_len);
xpt_path_unlock(path);
xpt_free_path(path);
mtx_unlock(sim->mtx);
break;
}
case TARGIOCDISABLE:
@ -278,13 +276,10 @@ targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
cdbg.flags = CAM_DEBUG_PERIPH;
else
cdbg.flags = CAM_DEBUG_NONE;
cam_periph_lock(softc->periph);
xpt_setup_ccb(&cdbg.ccb_h, softc->path, CAM_PRIORITY_NORMAL);
cdbg.ccb_h.func_code = XPT_DEBUG;
cdbg.ccb_h.cbfcnp = targdone;
xpt_action((union ccb *)&cdbg);
cam_periph_unlock(softc->periph);
status = cdbg.ccb_h.status & CAM_STATUS_MASK;
break;
}
@ -823,7 +818,7 @@ targread(struct cdev *dev, struct uio *uio, int ioflag)
user_descr = TAILQ_FIRST(abort_queue);
while (ccb_h == NULL && user_descr == NULL) {
if ((ioflag & IO_NDELAY) == 0) {
error = msleep(user_queue, softc->periph->sim->mtx,
error = cam_periph_sleep(softc->periph, user_queue,
PRIBIO | PCATCH, "targrd", 0);
ccb_h = TAILQ_FIRST(user_queue);
user_descr = TAILQ_FIRST(abort_queue);
@ -1019,7 +1014,6 @@ abort_all_pending(struct targ_softc *softc)
struct targ_cmd_descr *descr;
struct ccb_abort cab;
struct ccb_hdr *ccb_h;
struct cam_sim *sim;
CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n"));
@ -1052,8 +1046,7 @@ abort_all_pending(struct targ_softc *softc)
/* If we aborted at least one pending CCB ok, wait for it. */
if (cab.ccb_h.status == CAM_REQ_CMP) {
sim = xpt_path_sim(softc->path);
msleep(&softc->pending_ccb_queue, sim->mtx,
cam_periph_sleep(softc->periph, &softc->pending_ccb_queue,
PRIBIO | PCATCH, "tgabrt", 0);
}

View File

@ -583,7 +583,7 @@ static struct cam_ed *
lun_id_t lun_id);
static void scsi_devise_transport(struct cam_path *path);
static void scsi_set_transfer_settings(struct ccb_trans_settings *cts,
struct cam_ed *device,
struct cam_path *path,
int async_update);
static void scsi_toggle_tags(struct cam_path *path);
static void scsi_dev_async(u_int32_t async_code,
@ -645,6 +645,7 @@ proberegister(struct cam_periph *periph, void *arg)
return (status);
}
CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
scsi_devise_transport(periph->path);
/*
* Ensure we've waited at least a bus settle
@ -1719,11 +1720,12 @@ probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new,
if (path->target == NULL) {
return;
}
if (path->target->luns == NULL) {
path->target->luns = new;
return;
}
mtx_lock(&path->target->luns_mtx);
old = path->target->luns;
path->target->luns = new;
mtx_unlock(&path->target->luns_mtx);
if (old == NULL)
return;
nlun_old = scsi_4btoul(old->length) / 8;
nlun_new = scsi_4btoul(new->length) / 8;
@ -1774,7 +1776,6 @@ probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new,
}
}
free(old, M_CAMXPT);
path->target->luns = new;
}
static void
@ -1836,6 +1837,8 @@ typedef struct {
static void
scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
{
struct mtx *mtx;
CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
("scsi_scan_bus\n"));
switch (request_ccb->ccb_h.func_code) {
@ -1903,6 +1906,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
(work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT);
if (scan_info == NULL) {
request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
xpt_free_ccb(work_ccb);
xpt_done(request_ccb);
return;
}
@ -1933,6 +1937,8 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
scan_info->counter--;
}
}
mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
mtx_unlock(mtx);
for (i = low_target; i <= max_target; i++) {
cam_status status;
@ -1965,10 +1971,13 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
request_ccb->ccb_h.pinfo.priority);
work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
work_ccb->ccb_h.cbfcnp = scsi_scan_bus;
work_ccb->ccb_h.flags |= CAM_UNLOCKED;
work_ccb->ccb_h.ppriv_ptr0 = scan_info;
work_ccb->crcn.flags = request_ccb->crcn.flags;
xpt_action(work_ccb);
}
mtx_lock(mtx);
break;
}
case XPT_SCAN_LUN:
@ -2001,6 +2010,9 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
target = request_ccb->ccb_h.path->target;
next_target = 1;
mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
mtx_lock(mtx);
mtx_lock(&target->luns_mtx);
if (target->luns) {
lun_id_t first;
u_int nluns = scsi_4btoul(target->luns->length) / 8;
@ -2042,6 +2054,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
}
if (scan_info->lunindex[target_id] < nluns) {
mtx_unlock(&target->luns_mtx);
next_target = 0;
CAM_DEBUG(request_ccb->ccb_h.path,
CAM_DEBUG_PROBE,
@ -2050,6 +2063,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
(uintmax_t)lun_id));
scan_info->lunindex[target_id]++;
} else {
mtx_unlock(&target->luns_mtx);
/*
* We're done with scanning all luns.
*
@ -2068,7 +2082,9 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
}
}
}
} else if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
} else {
mtx_unlock(&target->luns_mtx);
if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
int phl;
/*
@ -2100,7 +2116,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
if (lun_id == request_ccb->ccb_h.target_lun
|| lun_id > scan_info->cpi->max_lun)
next_target = 1;
} else {
} else {
device = request_ccb->ccb_h.path->device;
@ -2116,6 +2132,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
if (lun_id == request_ccb->ccb_h.target_lun
|| lun_id > scan_info->cpi->max_lun)
next_target = 1;
}
}
/*
@ -2149,6 +2166,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
}
}
if (done) {
mtx_unlock(mtx);
xpt_free_ccb(request_ccb);
xpt_free_ccb((union ccb *)scan_info->cpi);
request_ccb = scan_info->request_ccb;
@ -2162,6 +2180,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
}
if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
mtx_unlock(mtx);
xpt_free_ccb(request_ccb);
break;
}
@ -2169,6 +2188,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
scan_info->request_ccb->ccb_h.path_id,
scan_info->counter, 0);
if (status != CAM_REQ_CMP) {
mtx_unlock(mtx);
printf("scsi_scan_bus: xpt_create_path failed"
" with status %#x, bus scan halted\n",
status);
@ -2184,6 +2204,7 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
request_ccb->ccb_h.pinfo.priority);
request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
request_ccb->ccb_h.flags |= CAM_UNLOCKED;
request_ccb->ccb_h.ppriv_ptr0 = scan_info;
request_ccb->crcn.flags =
scan_info->request_ccb->crcn.flags;
@ -2207,10 +2228,12 @@ scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
request_ccb->ccb_h.pinfo.priority);
request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
request_ccb->ccb_h.flags |= CAM_UNLOCKED;
request_ccb->ccb_h.ppriv_ptr0 = scan_info;
request_ccb->crcn.flags =
scan_info->request_ccb->crcn.flags;
}
mtx_unlock(mtx);
xpt_action(request_ccb);
break;
}
@ -2227,6 +2250,7 @@ scsi_scan_lun(struct cam_periph *periph, struct cam_path *path,
cam_status status;
struct cam_path *new_path;
struct cam_periph *old_periph;
int lock;
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n"));
@ -2274,9 +2298,13 @@ scsi_scan_lun(struct cam_periph *periph, struct cam_path *path,
xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT);
request_ccb->ccb_h.cbfcnp = xptscandone;
request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
request_ccb->ccb_h.flags |= CAM_UNLOCKED;
request_ccb->crcn.flags = flags;
}
lock = (xpt_path_owned(path) == 0);
if (lock)
xpt_path_lock(path);
if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
probe_softc *softc;
@ -2302,6 +2330,8 @@ scsi_scan_lun(struct cam_periph *periph, struct cam_path *path,
xpt_done(request_ccb);
}
}
if (lock)
xpt_path_unlock(path);
}
static void
@ -2315,7 +2345,6 @@ xptscandone(struct cam_periph *periph, union ccb *done_ccb)
static struct cam_ed *
scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
{
struct cam_path path;
struct scsi_quirk_entry *quirk;
struct cam_ed *device;
@ -2340,22 +2369,6 @@ scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
device->device_id_len = 0;
device->supported_vpds = NULL;
device->supported_vpds_len = 0;
/*
* XXX should be limited by number of CCBs this bus can
* do.
*/
bus->sim->max_ccbs += device->ccbq.devq_openings;
if (lun_id != CAM_LUN_WILDCARD) {
xpt_compile_path(&path,
NULL,
bus->path_id,
target->target_id,
lun_id);
scsi_devise_transport(&path);
xpt_release_path(&path);
}
return (device);
}
@ -2534,15 +2547,8 @@ scsi_dev_advinfo(union ccb *start_ccb)
start_ccb->ccb_h.status = CAM_REQ_CMP;
if (cdai->flags & CDAI_FLAG_STORE) {
int owned;
owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx);
if (owned == 0)
mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx);
xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
(void *)(uintptr_t)cdai->buftype);
if (owned == 0)
mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx);
}
}
@ -2554,7 +2560,7 @@ scsi_action(union ccb *start_ccb)
case XPT_SET_TRAN_SETTINGS:
{
scsi_set_transfer_settings(&start_ccb->cts,
start_ccb->ccb_h.path->device,
start_ccb->ccb_h.path,
/*async_update*/FALSE);
break;
}
@ -2567,14 +2573,6 @@ scsi_action(union ccb *start_ccb)
start_ccb->ccb_h.path, start_ccb->crcn.flags,
start_ccb);
break;
case XPT_GET_TRAN_SETTINGS:
{
struct cam_sim *sim;
sim = start_ccb->ccb_h.path->bus->sim;
(*(sim->sim_action))(sim, start_ccb);
break;
}
case XPT_DEV_ADVINFO:
{
scsi_dev_advinfo(start_ccb);
@ -2587,17 +2585,17 @@ scsi_action(union ccb *start_ccb)
}
static void
scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path,
int async_update)
{
struct ccb_pathinq cpi;
struct ccb_trans_settings cur_cts;
struct ccb_trans_settings_scsi *scsi;
struct ccb_trans_settings_scsi *cur_scsi;
struct cam_sim *sim;
struct scsi_inquiry_data *inq_data;
struct cam_ed *device;
if (device == NULL) {
if (path == NULL || (device = path->device) == NULL) {
cts->ccb_h.status = CAM_PATH_INVALID;
xpt_done((union ccb *)cts);
return;
@ -2614,14 +2612,14 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
cts->protocol_version = device->protocol_version;
if (cts->protocol != device->protocol) {
xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
xpt_print(path, "Uninitialized Protocol %x:%x?\n",
cts->protocol, device->protocol);
cts->protocol = device->protocol;
}
if (cts->protocol_version > device->protocol_version) {
if (bootverbose) {
xpt_print(cts->ccb_h.path, "Down reving Protocol "
xpt_print(path, "Down reving Protocol "
"Version from %d to %d?\n", cts->protocol_version,
device->protocol_version);
}
@ -2639,22 +2637,20 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
cts->transport_version = device->transport_version;
if (cts->transport != device->transport) {
xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
xpt_print(path, "Uninitialized Transport %x:%x?\n",
cts->transport, device->transport);
cts->transport = device->transport;
}
if (cts->transport_version > device->transport_version) {
if (bootverbose) {
xpt_print(cts->ccb_h.path, "Down reving Transport "
xpt_print(path, "Down reving Transport "
"Version from %d to %d?\n", cts->transport_version,
device->transport_version);
}
cts->transport_version = device->transport_version;
}
sim = cts->ccb_h.path->bus->sim;
/*
* Nothing more of interest to do unless
* this is a device connected via the
@ -2662,13 +2658,13 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
*/
if (cts->protocol != PROTO_SCSI) {
if (async_update == FALSE)
(*(sim->sim_action))(sim, (union ccb *)cts);
xpt_action_default((union ccb *)cts);
return;
}
inq_data = &device->inq_data;
scsi = &cts->proto_specific.scsi;
xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE);
xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
cpi.ccb_h.func_code = XPT_PATH_INQ;
xpt_action((union ccb *)&cpi);
@ -2689,7 +2685,7 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
* Perform sanity checking against what the
* controller and device can do.
*/
xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE);
xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE);
cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
cur_cts.type = cts->type;
xpt_action((union ccb *)&cur_cts);
@ -2810,7 +2806,7 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
&& (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
CTS_SPI_VALID_SYNC_OFFSET|
CTS_SPI_VALID_BUS_WIDTH)) != 0)
scsi_toggle_tags(cts->ccb_h.path);
scsi_toggle_tags(path);
}
if (cts->type == CTS_TYPE_CURRENT_SETTINGS
@ -2847,12 +2843,12 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
device->tag_delay_count = CAM_TAG_DELAY_COUNT;
device->flags |= CAM_DEV_TAG_AFTER_COUNT;
} else {
xpt_stop_tags(cts->ccb_h.path);
xpt_stop_tags(path);
}
}
}
if (async_update == FALSE)
(*(sim->sim_action))(sim, (union ccb *)cts);
xpt_action_default((union ccb *)cts);
}
static void
@ -2880,10 +2876,10 @@ scsi_toggle_tags(struct cam_path *path)
cts.transport_version = XPORT_VERSION_UNSPECIFIED;
cts.proto_specific.scsi.flags = 0;
cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
scsi_set_transfer_settings(&cts, path->device,
scsi_set_transfer_settings(&cts, path,
/*async_update*/TRUE);
cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
scsi_set_transfer_settings(&cts, path->device,
scsi_set_transfer_settings(&cts, path,
/*async_update*/TRUE);
}
}
@ -2954,10 +2950,14 @@ scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
xpt_release_device(device);
} else if (async_code == AC_TRANSFER_NEG) {
struct ccb_trans_settings *settings;
struct cam_path path;
settings = (struct ccb_trans_settings *)async_arg;
scsi_set_transfer_settings(settings, device,
xpt_compile_path(&path, NULL, bus->path_id, target->target_id,
device->lun_id);
scsi_set_transfer_settings(settings, &path,
/*async_update*/TRUE);
xpt_release_path(&path);
}
}
@ -2971,7 +2971,7 @@ scsi_announce_periph(struct cam_periph *periph)
u_int freq;
u_int mb;
mtx_assert(periph->sim->mtx, MA_OWNED);
cam_periph_assert(periph, MA_OWNED);
xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;

View File

@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
static int ahci_setup_interrupt(device_t dev);
static void ahci_intr(void *data);
static void ahci_intr_one(void *data);
static void ahci_intr_one_edge(void *data);
static int ahci_suspend(device_t dev);
static int ahci_resume(device_t dev);
static int ahci_ch_init(device_t dev);
@ -62,8 +63,9 @@ static int ahci_ch_deinit(device_t dev);
static int ahci_ch_suspend(device_t dev);
static int ahci_ch_resume(device_t dev);
static void ahci_ch_pm(void *arg);
static void ahci_ch_intr_locked(void *data);
static void ahci_ch_intr(void *data);
static void ahci_ch_intr(void *arg);
static void ahci_ch_intr_direct(void *arg);
static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus);
static int ahci_ctlr_reset(device_t dev);
static int ahci_ctlr_setup(device_t dev);
static void ahci_begin_transaction(device_t dev, union ccb *ccb);
@ -430,6 +432,7 @@ ahci_attach(device_t dev)
struct ahci_controller *ctlr = device_get_softc(dev);
device_t child;
int error, unit, speed, i;
u_int u;
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
u_int32_t version;
@ -529,6 +532,12 @@ ahci_attach(device_t dev)
rman_fini(&ctlr->sc_iomem);
return ENXIO;
}
i = 0;
for (u = ctlr->ichannels; u != 0; u >>= 1)
i += (u & 1);
ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3));
resource_int_value(device_get_name(dev), device_get_unit(dev),
"direct", &ctlr->direct);
/* Announce HW capabilities. */
speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT;
device_printf(dev,
@ -710,24 +719,26 @@ static int
ahci_setup_interrupt(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
int i, msi = 1;
int i;
ctlr->msi = 2;
/* Process hints. */
if (ctlr->quirks & AHCI_Q_NOMSI)
msi = 0;
ctlr->msi = 0;
resource_int_value(device_get_name(dev),
device_get_unit(dev), "msi", &msi);
if (msi < 0)
msi = 0;
else if (msi == 1)
msi = min(1, pci_msi_count(dev));
else if (msi > 1)
msi = pci_msi_count(dev);
device_get_unit(dev), "msi", &ctlr->msi);
ctlr->numirqs = 1;
if (ctlr->msi < 0)
ctlr->msi = 0;
else if (ctlr->msi == 1)
ctlr->msi = min(1, pci_msi_count(dev));
else if (ctlr->msi > 1) {
ctlr->msi = 2;
ctlr->numirqs = pci_msi_count(dev);
}
/* Allocate MSI if needed/present. */
if (msi && pci_alloc_msi(dev, &msi) == 0) {
ctlr->numirqs = msi;
} else {
msi = 0;
if (ctlr->msi && pci_alloc_msi(dev, &ctlr->numirqs) != 0) {
ctlr->msi = 0;
ctlr->numirqs = 1;
}
/* Check for single MSI vector fallback. */
@ -739,7 +750,7 @@ ahci_setup_interrupt(device_t dev)
/* Allocate all IRQs. */
for (i = 0; i < ctlr->numirqs; i++) {
ctlr->irqs[i].ctlr = ctlr;
ctlr->irqs[i].r_irq_rid = i + (msi ? 1 : 0);
ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0);
if (ctlr->numirqs == 1 || i >= ctlr->channels ||
(ctlr->ccc && i == ctlr->cccv))
ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL;
@ -753,7 +764,9 @@ ahci_setup_interrupt(device_t dev)
return ENXIO;
}
if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL,
(ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE) ? ahci_intr_one : ahci_intr,
(ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr :
((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge :
ahci_intr_one),
&ctlr->irqs[i], &ctlr->irqs[i].handle))) {
/* SOS XXX release r_irq */
device_printf(dev, "unable to setup interrupt\n");
@ -822,14 +835,25 @@ ahci_intr_one(void *data)
int unit;
unit = irq->r_irq_rid - 1;
/* Some controllers have edge triggered IS. */
if (ctlr->quirks & AHCI_Q_EDGEIS)
ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
if ((arg = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(arg);
/* AHCI declares level triggered IS. */
if (!(ctlr->quirks & AHCI_Q_EDGEIS))
ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
}
static void
ahci_intr_one_edge(void *data)
{
struct ahci_controller_irq *irq = data;
struct ahci_controller *ctlr = irq->ctlr;
void *arg;
int unit;
unit = irq->r_irq_rid - 1;
/* Some controllers have edge triggered IS. */
ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
if ((arg = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(arg);
}
static struct resource *
@ -1033,6 +1057,7 @@ ahci_ch_attach(device_t dev)
mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF);
resource_int_value(device_get_name(dev),
device_get_unit(dev), "pm_level", &ch->pm_level);
STAILQ_INIT(&ch->doneq);
if (ch->pm_level > 3)
callout_init_mtx(&ch->pm_timer, &ch->mtx, 0);
callout_init_mtx(&ch->reset_timer, &ch->mtx, 0);
@ -1078,7 +1103,8 @@ ahci_ch_attach(device_t dev)
goto err0;
}
if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
ahci_ch_intr_locked, dev, &ch->ih))) {
ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr,
dev, &ch->ih))) {
device_printf(dev, "Unable to setup interrupt\n");
error = ENXIO;
goto err1;
@ -1501,18 +1527,60 @@ ahci_notify_events(device_t dev, u_int32_t status)
}
static void
ahci_ch_intr_locked(void *data)
ahci_done(struct ahci_channel *ch, union ccb *ccb)
{
device_t dev = (device_t)data;
mtx_assert(&ch->mtx, MA_OWNED);
if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
ch->batch == 0) {
xpt_done(ccb);
return;
}
STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe);
}
static void
ahci_ch_intr(void *arg)
{
device_t dev = (device_t)arg;
struct ahci_channel *ch = device_get_softc(dev);
uint32_t istatus;
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
if (istatus == 0)
return;
mtx_lock(&ch->mtx);
xpt_batch_start(ch->sim);
ahci_ch_intr(data);
xpt_batch_done(ch->sim);
ahci_ch_intr_main(ch, istatus);
mtx_unlock(&ch->mtx);
}
static void
ahci_ch_intr_direct(void *arg)
{
device_t dev = (device_t)arg;
struct ahci_channel *ch = device_get_softc(dev);
struct ccb_hdr *ccb_h;
uint32_t istatus;
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
if (istatus == 0)
return;
mtx_lock(&ch->mtx);
ch->batch = 1;
ahci_ch_intr_main(ch, istatus);
ch->batch = 0;
mtx_unlock(&ch->mtx);
while ((ccb_h = STAILQ_FIRST(&ch->doneq)) != NULL) {
STAILQ_REMOVE_HEAD(&ch->doneq, sim_links.stqe);
xpt_done_direct((union ccb *)ccb_h);
}
}
static void
ahci_ch_pm(void *arg)
{
@ -1531,18 +1599,14 @@ ahci_ch_pm(void *arg)
}
static void
ahci_ch_intr(void *data)
ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
{
device_t dev = (device_t)data;
struct ahci_channel *ch = device_get_softc(dev);
uint32_t istatus, cstatus, serr = 0, sntf = 0, ok, err;
device_t dev = ch->dev;
uint32_t cstatus, serr = 0, sntf = 0, ok, err;
enum ahci_err_type et;
int i, ccs, port, reset = 0;
/* Read and clear interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
if (istatus == 0)
return;
/* Clear interrupt statuses. */
ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus);
/* Read command statuses. */
if (ch->numtslots != 0)
@ -1634,7 +1698,7 @@ ahci_ch_intr(void *data)
xpt_freeze_devq(fccb->ccb_h.path, 1);
fccb->ccb_h.status |= CAM_DEV_QFRZN;
}
xpt_done(fccb);
ahci_done(ch, fccb);
}
for (i = 0; i < ch->numslots; i++) {
/* XXX: reqests in loading state. */
@ -2043,7 +2107,7 @@ ahci_timeout(struct ahci_slot *slot)
xpt_freeze_devq(fccb->ccb_h.path, 1);
fccb->ccb_h.status |= CAM_DEV_QFRZN;
}
xpt_done(fccb);
ahci_done(ch, fccb);
}
if (!ch->fbs_enabled && !ch->wrongccs) {
/* Without FBS we know real timeout source. */
@ -2249,7 +2313,7 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
ch->hold[slot->slot] = ccb;
ch->numhslots++;
} else
xpt_done(ccb);
ahci_done(ch, ccb);
/* If we have no other active commands, ... */
if (ch->rslots == 0) {
/* if there was fatal error - reset port. */
@ -2309,7 +2373,7 @@ completeall:
continue;
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL;
xpt_done(ch->hold[i]);
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
@ -2397,7 +2461,7 @@ ahci_process_read_log(device_t dev, union ccb *ccb)
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ;
}
xpt_done(ch->hold[i]);
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
@ -2412,7 +2476,7 @@ ahci_process_read_log(device_t dev, union ccb *ccb)
continue;
if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
continue;
xpt_done(ch->hold[i]);
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
@ -2437,7 +2501,7 @@ ahci_process_request_sense(device_t dev, union ccb *ccb)
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL;
}
xpt_done(ch->hold[i]);
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
xpt_free_ccb(ccb);
@ -2621,7 +2685,7 @@ ahci_reset(device_t dev)
xpt_freeze_devq(fccb->ccb_h.path, 1);
fccb->ccb_h.status |= CAM_DEV_QFRZN;
}
xpt_done(fccb);
ahci_done(ch, fccb);
}
/* Kill the engine and requeue all running commands. */
ahci_stop(dev);
@ -2635,7 +2699,7 @@ ahci_reset(device_t dev)
for (i = 0; i < ch->numslots; i++) {
if (!ch->hold[i])
continue;
xpt_done(ch->hold[i]);
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
@ -2831,12 +2895,12 @@ ahci_check_ids(device_t dev, union ccb *ccb)
if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) {
ccb->ccb_h.status = CAM_TID_INVALID;
xpt_done(ccb);
ahci_done(ch, ccb);
return (-1);
}
if (ccb->ccb_h.target_lun != 0) {
ccb->ccb_h.status = CAM_LUN_INVALID;
xpt_done(ccb);
ahci_done(ch, ccb);
return (-1);
}
return (0);
@ -3028,15 +3092,19 @@ ahciaction(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
xpt_done(ccb);
ahci_done(ch, ccb);
}
static void
ahcipoll(struct cam_sim *sim)
{
struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim);
uint32_t istatus;
ahci_ch_intr(ch->dev);
/* Read interrupt statuses and process if any. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
if (istatus != 0)
ahci_ch_intr_main(ch, istatus);
if (ch->resetting != 0 &&
(--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) {
ch->resetpolldiv = 1000;

View File

@ -422,6 +422,8 @@ struct ahci_channel {
struct ahci_slot slot[AHCI_MAX_SLOTS];
union ccb *hold[AHCI_MAX_SLOTS];
struct mtx mtx; /* state lock */
STAILQ_HEAD(, ccb_hdr) doneq; /* queue of completed CCBs */
int batch; /* doneq is in use */
int devices; /* What is present */
int pm_present; /* PM presence reported */
int fbs_enabled; /* FIS-based switching enabled */
@ -494,6 +496,8 @@ struct ahci_controller {
int ichannels;
int ccc; /* CCC timeout */
int cccv; /* CCC vector */
int direct; /* Direct command completion */
int msi; /* MSI interupts */
struct {
void (*function)(void *);
void *argument;

View File

@ -353,9 +353,7 @@ ata_interrupt(void *data)
struct ata_channel *ch = (struct ata_channel *)data;
mtx_lock(&ch->state_mtx);
xpt_batch_start(ch->sim);
ata_interrupt_locked(data);
xpt_batch_done(ch->sim);
mtx_unlock(&ch->state_mtx);
}

View File

@ -106,7 +106,7 @@ isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
return (EIO);
}
ISP_UNLOCK(isp);
if (xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
ISP_LOCK(isp);
xpt_bus_deregister(cam_sim_path(sim));
ISP_UNLOCK(isp);
@ -4131,12 +4131,12 @@ isp_target_thread(ispsoftc_t *isp, int chan)
periphdriver_register(&isptargdriver);
ISP_GET_PC(isp, chan, sim, sim);
ISP_GET_PC(isp, chan, path, path);
status = xpt_create_path_unlocked(&wpath, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
status = xpt_create_path(&wpath, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
if (status != CAM_REQ_CMP) {
isp_prt(isp, ISP_LOGERR, "%s: could not allocate wildcard path", __func__);
return;
}
status = xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), 0, 0);
status = xpt_create_path(&path, NULL, cam_sim_path(sim), 0, 0);
if (status != CAM_REQ_CMP) {
xpt_free_path(wpath);
isp_prt(isp, ISP_LOGERR, "%s: could not allocate path", __func__);

View File

@ -654,9 +654,7 @@ mvs_ch_intr_locked(void *data)
struct mvs_channel *ch = device_get_softc(dev);
mtx_lock(&ch->mtx);
xpt_batch_start(ch->sim);
mvs_ch_intr(data);
xpt_batch_done(ch->sim);
mtx_unlock(&ch->mtx);
}

View File

@ -838,9 +838,7 @@ siis_ch_intr_locked(void *data)
struct siis_channel *ch = device_get_softc(dev);
mtx_lock(&ch->mtx);
xpt_batch_start(ch->sim);
siis_ch_intr(data);
xpt_batch_done(ch->sim);
mtx_unlock(&ch->mtx);
}

View File

@ -315,20 +315,12 @@ char *
getenv(const char *name)
{
char buf[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1];
char *ret, *cp;
int len;
char *ret;
if (dynamic_kenv) {
mtx_lock(&kenv_lock);
cp = _getenv_dynamic(name, NULL);
if (cp != NULL) {
strcpy(buf, cp);
mtx_unlock(&kenv_lock);
len = strlen(buf) + 1;
ret = malloc(len, M_KENV, M_WAITOK);
strcpy(ret, buf);
if (getenv_string(name, buf, sizeof(buf))) {
ret = strdup(buf, M_KENV);
} else {
mtx_unlock(&kenv_lock);
ret = NULL;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"getenv");
@ -458,15 +450,20 @@ unsetenv(const char *name)
int
getenv_string(const char *name, char *data, int size)
{
char *tmp;
char *cp;
tmp = getenv(name);
if (tmp != NULL) {
strlcpy(data, tmp, size);
freeenv(tmp);
return (1);
} else
return (0);
if (dynamic_kenv) {
mtx_lock(&kenv_lock);
cp = _getenv_dynamic(name, NULL);
if (cp != NULL)
strlcpy(data, cp, size);
mtx_unlock(&kenv_lock);
} else {
cp = _getenv_static(name);
if (cp != NULL)
strlcpy(data, cp, size);
}
return (cp != NULL);
}
/*
@ -535,18 +532,15 @@ getenv_ulong(const char *name, unsigned long *data)
int
getenv_quad(const char *name, quad_t *data)
{
char *value;
char value[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1];
char *vtp;
quad_t iv;
value = getenv(name);
if (value == NULL)
if (!getenv_string(name, value, sizeof(value)))
return (0);
iv = strtoq(value, &vtp, 0);
if (vtp == value || (vtp[0] != '\0' && vtp[1] != '\0')) {
freeenv(value);
if (vtp == value || (vtp[0] != '\0' && vtp[1] != '\0'))
return (0);
}
switch (vtp[0]) {
case 't': case 'T':
iv *= 1024;
@ -559,11 +553,9 @@ getenv_quad(const char *name, quad_t *data)
case '\0':
break;
default:
freeenv(value);
return (0);
}
*data = iv;
freeenv(value);
return (1);
}