Lay groundwork in CAM for recording and reporting physical path and

other device attributes stored in the CAM Existing Device Table (EDT).
This includes some infrastructure requried by the enclosure services
driver to export physical path information.

Make the CAM device advanced info interface accept store requests.

  sys/cam/scsi/scsi_all.c:
  sys/cam/scsi/scsi_all.h:
	- Replace scsi_get_sas_addr() with a scsi_get_devid() which takes
	  a callback that decides whether to accept a particular descriptor.
	  Provide callbacks for NAA IEEE Registered addresses and for SAS
	  addresses, replacing the old function.  This is needed because
	  the old function doesn't work for an enclosure address for a SAS
	  device, which is not flagged as a SAS address, but is NAA IEEE
	  Registered.  It may be worthwhile merging this interface with the
	  devid match interface.
	- Add a few more defines for some device ID fields.

  sbin/camcontrol/camcontrol.c:
	- Update for the CCB_DEV_ADVINFO interface change.

  cam/cam_xpt_internal.h:
	- Add the new fields for the physical path string to the CAM EDT.
  cam/cam_ccb.h:
	- Rename CCB_GDEV_ADVINFO to simply CCB_DEV_ADVINFO, and the ccb
	  structure to ccb_dev_advinfo.
	- Add a flag that changes this CCB's action to store, rather than
	  the default, retrieve.
	- Add a new buffer type, CDAI_TYPE_PHYS_PATH, for the new CAM EDT
	  physpath field.
	- Remove the never-implemented transport & proto flags.
  cam/cam_xpt.c:
  cam/cam_xpt.h:
	- Add xpt_getattr(), which provides a wrapper for fetching a device's
	  attribute using the GEOM strings as key.  This method currently
	  supports "GEOM::ident" and "GEOM::physpath".

Submitted by: will
Reviewed by : gibbs

Extend the XPT_DEV_MATCH api to allow a device search by device ID.
As far as the API is concerned, device ID is a binary blob to be
interpreted by the transport layer.  The SCSI implementation assumes
it is an array of VPD device ID descriptors.

  sys/cam/cam_ccb.h:
	Create a new structure, device_id_match_pattern, and
	update the XPT_DEV_MATCH datastructures and flags so
	that this pattern type can be used.

  sys/cam/cam_xpt.c:
	- A single pattern matching on both inquiry data and device
	  ID is invalid.  Report any violators.
	- Pass device ID match requests through to the new routine
	  scsi_devid_match().  The direct call of a SCSI routine is
	  a layering violation, but no worse than the one a few
	  lines up that checks inquiry data.  Defer cleaning this
	  up until our future, larger, rototilling of CAM.
	- Zero out cam_ed and cam_et nodes on allocation.  Prior to
	  this change, device_id_len and device_id were not inialized,
	  preventing proper detection of the presence of this
	  information.

  sys/cam/scsi/scsi_all.c:
  sys/cam/scsi/scsi_all.h:
	Add the scsi_match_devid() routine.

Add a helper function for extracting peripherial driver names

  sys/cam/cam_periph.c:
  sys/cam/cam_periph.h:
	Add the cam_periph_list() method which fills an sbuf
	with a comma delimited list of the peripheral instances
	associated with a given CAM path.

Add a helper functions for SCSI commands used by the SES driver.

  sys/cam/scsi/scsi_all.c:
  sys/cam/scsi/scsi_all.h:
	Add structure definitions and csio filling functions for
	the receive diagnostic results and send diagnostic commands.

Misc CAM XPT cleanups.

  sys/cam/cam_xpt.c:
	Broadcast AC_FOUND_DEVICE and AC_PATH_REGISTERED
	events at the time async event handlers are attached
	even when registering just for events on a partitular
	SIM.  Previously, you had to register for these
	events on all SIMs in the system in order to get
	the initial broadcast even though subsequent device
	and path arrivals would be delivered.

  sys/cam/cam_xpt.c:
	Remove SIM mutex held asserts from path accessors.
	CAM paths are reference counted and it is this
	reference count, not the sim mutex, that garantees
	they are stable.

Sponsored by: Spectra Logic Corporation
This commit is contained in:
Justin T. Gibbs 2011-06-14 14:53:17 +00:00
parent 1875bbfe54
commit 3501942bbe
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=223081
11 changed files with 476 additions and 124 deletions

View File

@ -5044,13 +5044,13 @@ getdevid(struct cam_devitem *item)
* then allocate that much memory and try again.
*/
retry:
ccb->ccb_h.func_code = XPT_GDEV_ADVINFO;
ccb->ccb_h.func_code = XPT_DEV_ADVINFO;
ccb->ccb_h.flags = CAM_DIR_IN;
ccb->cgdai.flags = CGDAI_FLAG_PROTO;
ccb->cgdai.buftype = CGDAI_TYPE_SCSI_DEVID;
ccb->cgdai.bufsiz = item->device_id_len;
ccb->cdai.flags = 0;
ccb->cdai.buftype = CDAI_TYPE_SCSI_DEVID;
ccb->cdai.bufsiz = item->device_id_len;
if (item->device_id_len != 0)
ccb->cgdai.buf = (uint8_t *)item->device_id;
ccb->cdai.buf = (uint8_t *)item->device_id;
if (cam_send_ccb(dev, ccb) < 0) {
warn("%s: error sending XPT_GDEV_ADVINFO CCB", __func__);
@ -5069,13 +5069,13 @@ getdevid(struct cam_devitem *item)
* This is our first time through. Allocate the buffer,
* and then go back to get the data.
*/
if (ccb->cgdai.provsiz == 0) {
if (ccb->cdai.provsiz == 0) {
warnx("%s: invalid .provsiz field returned with "
"XPT_GDEV_ADVINFO CCB", __func__);
retval = 1;
goto bailout;
}
item->device_id_len = ccb->cgdai.provsiz;
item->device_id_len = ccb->cdai.provsiz;
item->device_id = malloc(item->device_id_len);
if (item->device_id == NULL) {
warn("%s: unable to allocate %d bytes", __func__,
@ -5283,8 +5283,9 @@ findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr)
/*
* XXX KDM look for LUN IDs as well?
*/
item_addr = scsi_get_sas_addr(item->device_id,
item->device_id_len);
item_addr = scsi_get_devid(item->device_id,
item->device_id_len,
scsi_devid_is_sas_target);
if (item_addr == NULL)
continue;

View File

@ -144,8 +144,8 @@ typedef enum {
/* Device statistics (error counts, etc.) */
XPT_FREEZE_QUEUE = 0x0d,
/* Freeze device queue */
XPT_GDEV_ADVINFO = 0x0e,
/* Advanced device information */
XPT_DEV_ADVINFO = 0x0e,
/* Get/Set Device advanced information */
/* SCSI Control Functions: 0x10->0x1F */
XPT_ABORT = 0x10,
/* Abort the specified CCB */
@ -391,15 +391,24 @@ typedef enum {
DEV_MATCH_TARGET = 0x002,
DEV_MATCH_LUN = 0x004,
DEV_MATCH_INQUIRY = 0x008,
DEV_MATCH_DEVID = 0x010,
DEV_MATCH_ANY = 0x00f
} dev_pattern_flags;
struct device_id_match_pattern {
uint8_t id_len;
uint8_t id[256];
};
struct device_match_pattern {
path_id_t path_id;
target_id_t target_id;
lun_id_t target_lun;
struct scsi_static_inquiry_pattern inq_pat;
dev_pattern_flags flags;
path_id_t path_id;
target_id_t target_id;
lun_id_t target_lun;
dev_pattern_flags flags;
union {
struct scsi_static_inquiry_pattern inq_pat;
struct device_id_match_pattern devid_pat;
} data;
};
typedef enum {
@ -745,6 +754,7 @@ struct ccb_relsim {
* Definitions for the asynchronous callback CCB fields.
*/
typedef enum {
AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */
AC_CONTRACT = 0x1000,/* A contractual callback */
AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */
AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */
@ -1094,19 +1104,20 @@ struct ccb_eng_exec { /* This structure must match SCSIIO size */
#define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */
/*
* CCB for getting advanced device information. This operates in a fashion
* CCB for working with advanced device information. This operates in a fashion
* similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer
* type requested, and provide a buffer size/buffer to write to. If the
* buffer is too small, the handler will set GDEVAI_FLAG_MORE.
* buffer is too small, provsiz will be larger than bufsiz.
*/
struct ccb_getdev_advinfo {
struct ccb_dev_advinfo {
struct ccb_hdr ccb_h;
uint32_t flags;
#define CGDAI_FLAG_TRANSPORT 0x1
#define CGDAI_FLAG_PROTO 0x2
#define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */
uint32_t buftype; /* IN: Type of data being requested */
/* NB: buftype is interpreted on a per-transport basis */
#define CGDAI_TYPE_SCSI_DEVID 1
#define CDAI_TYPE_SCSI_DEVID 1
#define CDAI_TYPE_SERIAL_NUM 2
#define CDAI_TYPE_PHYS_PATH 3
off_t bufsiz; /* IN: Size of external buffer */
#define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
off_t provsiz; /* OUT: Size required/used */
@ -1151,7 +1162,7 @@ union ccb {
struct ccb_rescan crcn;
struct ccb_debug cdbg;
struct ccb_ataio ataio;
struct ccb_getdev_advinfo cgdai;
struct ccb_dev_advinfo cdai;
};
__BEGIN_DECLS

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/devicestat.h>
#include <sys/bus.h>
#include <sys/sbuf.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@ -303,6 +304,38 @@ cam_periph_find(struct cam_path *path, char *name)
return(NULL);
}
/*
* Find a peripheral structure with the specified path, target, lun,
* and (optionally) type. If the name is NULL, this function will return
* the first peripheral driver that matches the specified path.
*/
int
cam_periph_list(struct cam_path *path, struct sbuf *sb)
{
struct periph_driver **p_drv;
struct cam_periph *periph;
int count;
count = 0;
xpt_lock_buses();
for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
if (xpt_path_comp(periph->path, path) != 0)
continue;
if (sbuf_len(sb) != 0)
sbuf_cat(sb, ",");
sbuf_printf(sb, "%s%d", periph->periph_name,
periph->unit_number);
count++;
}
}
xpt_unlock_buses();
return (count);
}
cam_status
cam_periph_acquire(struct cam_periph *periph)
{
@ -654,12 +687,12 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
dirs[1] = CAM_DIR_IN;
numbufs = 2;
break;
case XPT_GDEV_ADVINFO:
if (ccb->cgdai.bufsiz == 0)
case XPT_DEV_ADVINFO:
if (ccb->cdai.bufsiz == 0)
return (0);
data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf;
lengths[0] = ccb->cgdai.bufsiz;
data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
lengths[0] = ccb->cdai.bufsiz;
dirs[0] = CAM_DIR_IN;
numbufs = 1;
@ -813,9 +846,9 @@ cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
data_ptrs[0] = &ccb->smpio.smp_request;
data_ptrs[1] = &ccb->smpio.smp_response;
break;
case XPT_GDEV_ADVINFO:
case XPT_DEV_ADVINFO:
numbufs = min(mapinfo->num_bufs_used, 1);
data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf;
data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
break;
default:
/* allow ourselves to be swapped once again */

View File

@ -142,6 +142,7 @@ cam_status cam_periph_alloc(periph_ctor_t *periph_ctor,
char *name, cam_periph_type type, struct cam_path *,
ac_callback_t *, ac_code, void *arg);
struct cam_periph *cam_periph_find(struct cam_path *path, char *name);
int cam_periph_list(struct cam_path *, struct sbuf *);
cam_status cam_periph_acquire(struct cam_periph *periph);
void cam_periph_release(struct cam_periph *periph);
void cam_periph_release_locked(struct cam_periph *periph);
@ -200,5 +201,12 @@ cam_periph_owned(struct cam_periph *periph)
return (mtx_owned(periph->sim->mtx));
}
static __inline int
cam_periph_sleep(struct cam_periph *periph, void *chan, int priority,
const char *wmesg, int timo)
{
return (msleep(chan, periph->sim->mtx, priority, wmesg, timo));
}
#endif /* _KERNEL */
#endif /* _CAM_CAM_PERIPH_H */

View File

@ -287,9 +287,6 @@ static xpt_targetfunc_t xptdeftargetfunc;
static xpt_devicefunc_t xptdefdevicefunc;
static xpt_periphfunc_t xptdefperiphfunc;
static void xpt_finishconfig_task(void *context, int pending);
static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
void *arg);
static void xpt_dev_async_default(u_int32_t async_code,
struct cam_eb *bus,
struct cam_et *target,
@ -1105,6 +1102,44 @@ xpt_announce_periph(struct cam_periph *periph, char *announce_string)
periph->unit_number, announce_string);
}
int
xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
{
int ret = -1;
struct ccb_dev_advinfo cdai;
memset(&cdai, 0, sizeof(cdai));
xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
cdai.bufsiz = len;
if (!strcmp(attr, "GEOM::ident"))
cdai.buftype = CDAI_TYPE_SERIAL_NUM;
else if (!strcmp(attr, "GEOM::physpath"))
cdai.buftype = CDAI_TYPE_PHYS_PATH;
else
goto out;
cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
if (cdai.buf == NULL) {
ret = ENOMEM;
goto out;
}
xpt_action((union ccb *)&cdai); /* can only be synchronous */
if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
if (cdai.provsiz == 0)
goto out;
ret = 0;
if (strlcpy(buf, cdai.buf, len) >= len)
ret = EFAULT;
out:
if (cdai.buf != NULL)
free(cdai.buf, M_CAMXPT);
return ret;
}
static dev_match_ret
xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
struct cam_eb *bus)
@ -1241,6 +1276,7 @@ xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
for (i = 0; i < num_patterns; i++) {
struct device_match_pattern *cur_pattern;
struct scsi_vpd_device_id *device_id_page;
/*
* If the pattern in question isn't for a device node, we
@ -1255,22 +1291,17 @@ xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
cur_pattern = &patterns[i].pattern.device_pattern;
/* Error out if mutually exclusive options are specified. */
if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
== (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
return(DM_RET_ERROR);
/*
* If they want to match any device node, we give them any
* device node.
*/
if (cur_pattern->flags == DEV_MATCH_ANY) {
/* set the copy flag */
retval |= DM_RET_COPY;
/*
* If we've already decided on an action, go ahead
* and return.
*/
if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
return(retval);
}
if (cur_pattern->flags == DEV_MATCH_ANY)
goto copy_dev_node;
/*
* Not sure why someone would do this...
@ -1292,11 +1323,22 @@ xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
&& (cam_quirkmatch((caddr_t)&device->inq_data,
(caddr_t)&cur_pattern->inq_pat,
1, sizeof(cur_pattern->inq_pat),
(caddr_t)&cur_pattern->data.inq_pat,
1, sizeof(cur_pattern->data.inq_pat),
scsi_static_inquiry_match) == NULL))
continue;
device_id_page = (struct scsi_vpd_device_id *)device->device_id;
if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
&& (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
|| scsi_devid_match((uint8_t *)device_id_page->desc_list,
device->device_id_len
- SVPD_DEVICE_ID_HDR_LEN,
cur_pattern->data.devid_pat.id,
cur_pattern->data.devid_pat.id_len) != 0))
continue;
copy_dev_node:
/*
* If we get to this point, the user definitely wants
* information on this device. So tell the caller to copy
@ -2889,6 +2931,8 @@ xpt_action_default(union ccb *start_ccb)
case XPT_TERM_IO:
case XPT_ENG_INQ:
/* XXX Implement */
printf("%s: CCB type %#x not supported\n", __func__,
start_ccb->ccb_h.func_code);
start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
xpt_done(start_ccb);
@ -3528,16 +3572,12 @@ xpt_path_string(struct cam_path *path, char *str, size_t str_len)
path_id_t
xpt_path_path_id(struct cam_path *path)
{
mtx_assert(path->bus->sim->mtx, MA_OWNED);
return(path->bus->path_id);
}
target_id_t
xpt_path_target_id(struct cam_path *path)
{
mtx_assert(path->bus->sim->mtx, MA_OWNED);
if (path->target != NULL)
return (path->target->target_id);
else
@ -3547,8 +3587,6 @@ xpt_path_target_id(struct cam_path *path)
lun_id_t
xpt_path_lun_id(struct cam_path *path)
{
mtx_assert(path->bus->sim->mtx, MA_OWNED);
if (path->device != NULL)
return (path->device->lun_id);
else
@ -4242,7 +4280,8 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
{
struct cam_et *target;
target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
M_NOWAIT|M_ZERO);
if (target != NULL) {
struct cam_et *cur_target;
@ -4330,7 +4369,7 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
device = NULL;
} else {
device = (struct cam_ed *)malloc(sizeof(*device),
M_CAMXPT, M_NOWAIT);
M_CAMXPT, M_NOWAIT|M_ZERO);
}
if (device != NULL) {
@ -4676,27 +4715,29 @@ xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
csa.callback_arg = cbarg;
xpt_action((union ccb *)&csa);
status = csa.ccb_h.status;
if (xptpath) {
xpt_free_path(path);
mtx_unlock(&xsoftc.xpt_lock);
if ((status == CAM_REQ_CMP) &&
(csa.event_enable & AC_FOUND_DEVICE)) {
/*
* Get this peripheral up to date with all
* the currently existing devices.
*/
xpt_for_all_devices(xptsetasyncfunc, &csa);
}
if ((status == CAM_REQ_CMP) &&
(csa.event_enable & AC_PATH_REGISTERED)) {
/*
* Get this peripheral up to date with all
* the currently existing busses.
*/
xpt_for_all_busses(xptsetasyncbusfunc, &csa);
}
}
if ((status == CAM_REQ_CMP) &&
(csa.event_enable & AC_FOUND_DEVICE)) {
/*
* Get this peripheral up to date with all
* the currently existing devices.
*/
xpt_for_all_devices(xptsetasyncfunc, &csa);
}
if ((status == CAM_REQ_CMP) &&
(csa.event_enable & AC_PATH_REGISTERED)) {
/*
* Get this peripheral up to date with all
* the currently existing busses.
*/
xpt_for_all_busses(xptsetasyncbusfunc, &csa);
}
return (status);
}
@ -4852,8 +4893,10 @@ camisr_runqueue(void *V_queue)
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
&& (--dev->tag_delay_count == 0))
xpt_start_tags(ccb_h->path);
if (!device_is_send_queued(dev))
xpt_schedule_dev_sendq(ccb_h->path->bus, dev);
if (!device_is_send_queued(dev)) {
runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
dev);
}
}
if (ccb_h->status & CAM_RELEASE_SIMQ) {

View File

@ -103,6 +103,8 @@ cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr,
struct cam_periph *perph,
path_id_t path_id,
target_id_t target_id, lun_id_t lun_id);
int xpt_getattr(char *buf, size_t len, const char *attr,
struct cam_path *path);
void xpt_free_path(struct cam_path *path);
int xpt_path_comp(struct cam_path *path1,
struct cam_path *path2);

View File

@ -97,6 +97,8 @@ struct cam_ed {
uint8_t supported_vpds_len;
uint32_t device_id_len;
uint8_t *device_id;
uint8_t physpath_len;
uint8_t *physpath; /* physical path string form */
struct ata_params ident_data;
u_int8_t inq_flags; /*
* Current settings for inquiry flags.

View File

@ -3552,32 +3552,63 @@ scsi_calc_syncparam(u_int period)
return (period/400);
}
uint8_t *
scsi_get_sas_addr(struct scsi_vpd_device_id *id, uint32_t len)
int
scsi_devid_is_naa_ieee_reg(uint8_t *bufp)
{
uint8_t *bufp, *buf_end;
struct scsi_vpd_id_descriptor *descr;
struct scsi_vpd_id_naa_basic *naa;
bufp = buf_end = (uint8_t *)id;
bufp += SVPD_DEVICE_ID_HDR_LEN;
buf_end += len;
while (bufp < buf_end) {
descr = (struct scsi_vpd_id_descriptor *)bufp;
bufp += SVPD_DEVICE_ID_DESC_HDR_LEN;
/* Right now, we only care about SAS NAA IEEE Reg addrs */
if (((descr->id_type & SVPD_ID_PIV) != 0)
&& (descr->proto_codeset >> SVPD_ID_PROTO_SHIFT) ==
SCSI_PROTO_SAS
&& (descr->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_NAA){
naa = (struct scsi_vpd_id_naa_basic *)bufp;
if ((naa->naa >> 4) == SVPD_ID_NAA_IEEE_REG)
return bufp;
}
bufp += descr->length;
descr = (struct scsi_vpd_id_descriptor *)bufp;
naa = (struct scsi_vpd_id_naa_basic *)descr->identifier;
if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA)
return 0;
if (descr->length < sizeof(struct scsi_vpd_id_naa_ieee_reg))
return 0;
if ((naa->naa >> SVPD_ID_NAA_NAA_SHIFT) != SVPD_ID_NAA_IEEE_REG)
return 0;
return 1;
}
int
scsi_devid_is_sas_target(uint8_t *bufp)
{
struct scsi_vpd_id_descriptor *descr;
descr = (struct scsi_vpd_id_descriptor *)bufp;
if (!scsi_devid_is_naa_ieee_reg(bufp))
return 0;
if ((descr->id_type & SVPD_ID_PIV) == 0) /* proto field reserved */
return 0;
if ((descr->proto_codeset >> SVPD_ID_PROTO_SHIFT) != SCSI_PROTO_SAS)
return 0;
return 1;
}
uint8_t *
scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len,
scsi_devid_checkfn_t ck_fn)
{
struct scsi_vpd_id_descriptor *desc;
uint8_t *page_end;
uint8_t *desc_buf_end;
page_end = (uint8_t *)id + page_len;
if (page_end < id->desc_list)
return (NULL);
desc_buf_end = MIN(id->desc_list + scsi_2btoul(id->length), page_end);
for (desc = (struct scsi_vpd_id_descriptor *)id->desc_list;
desc->identifier <= desc_buf_end
&& desc->identifier + desc->length <= desc_buf_end;
desc = (struct scsi_vpd_id_descriptor *)(desc->identifier
+ desc->length)) {
if (ck_fn == NULL || ck_fn((uint8_t *)desc) != 0)
return (desc->identifier);
}
return NULL;
return (NULL);
}
void
@ -4174,6 +4205,77 @@ scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
timeout);
}
void
scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb*),
uint8_t tag_action, int pcv, uint8_t page_code,
uint8_t *data_ptr, uint16_t allocation_length,
uint8_t sense_len, uint32_t timeout)
{
struct scsi_receive_diag *scsi_cmd;
scsi_cmd = (struct scsi_receive_diag *)&csio->cdb_io.cdb_bytes;
memset(scsi_cmd, 0, sizeof(*scsi_cmd));
scsi_cmd->opcode = RECEIVE_DIAGNOSTIC;
if (pcv) {
scsi_cmd->byte2 |= SRD_PCV;
scsi_cmd->page_code = page_code;
}
scsi_ulto2b(allocation_length, scsi_cmd->length);
cam_fill_csio(csio,
retries,
cbfcnp,
/*flags*/CAM_DIR_IN,
tag_action,
data_ptr,
allocation_length,
sense_len,
sizeof(*scsi_cmd),
timeout);
}
void
scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
uint8_t tag_action, int unit_offline, int device_offline,
int self_test, int page_format, int self_test_code,
uint8_t *data_ptr, uint16_t param_list_length,
uint8_t sense_len, uint32_t timeout)
{
struct scsi_send_diag *scsi_cmd;
scsi_cmd = (struct scsi_send_diag *)&csio->cdb_io.cdb_bytes;
memset(scsi_cmd, 0, sizeof(*scsi_cmd));
scsi_cmd->opcode = SEND_DIAGNOSTIC;
/*
* The default self-test mode control and specific test
* control are mutually exclusive.
*/
if (self_test)
self_test_code = SSD_SELF_TEST_CODE_NONE;
scsi_cmd->byte2 = ((self_test_code << SSD_SELF_TEST_CODE_SHIFT)
& SSD_SELF_TEST_CODE_MASK)
| (unit_offline ? SSD_UNITOFFL : 0)
| (device_offline ? SSD_DEVOFFL : 0)
| (self_test ? SSD_SELFTEST : 0)
| (page_format ? SSD_PF : 0);
scsi_ulto2b(param_list_length, scsi_cmd->length);
cam_fill_csio(csio,
retries,
cbfcnp,
/*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE,
tag_action,
data_ptr,
param_list_length,
sense_len,
sizeof(*scsi_cmd),
timeout);
}
void
scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
@ -4206,7 +4308,6 @@ scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
sense_len,
sizeof(*scsi_cmd),
timeout);
}
@ -4264,6 +4365,66 @@ scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
return (-1);
}
/**
* Compare two buffers of vpd device descriptors for a match.
*
* \param lhs Pointer to first buffer of descriptors to compare.
* \param lhs_len The length of the first buffer.
* \param rhs Pointer to second buffer of descriptors to compare.
* \param rhs_len The length of the second buffer.
*
* \return 0 on a match, -1 otherwise.
*
* Treat rhs and lhs as arrays of vpd device id descriptors. Walk lhs matching
* agains each element in rhs until all data are exhausted or we have found
* a match.
*/
int
scsi_devid_match(uint8_t *lhs, size_t lhs_len, uint8_t *rhs, size_t rhs_len)
{
struct scsi_vpd_id_descriptor *lhs_id;
struct scsi_vpd_id_descriptor *lhs_last;
struct scsi_vpd_id_descriptor *rhs_last;
uint8_t *lhs_end;
uint8_t *rhs_end;
lhs_end = lhs + lhs_len;
rhs_end = rhs + rhs_len;
/*
* rhs_last and lhs_last are the last posible position of a valid
* descriptor assuming it had a zero length identifier. We use
* these variables to insure we can safely dereference the length
* field in our loop termination tests.
*/
lhs_last = (struct scsi_vpd_id_descriptor *)
(lhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier));
rhs_last = (struct scsi_vpd_id_descriptor *)
(rhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier));
lhs_id = (struct scsi_vpd_id_descriptor *)lhs;
while (lhs_id <= lhs_last
&& (lhs_id->identifier + lhs_id->length) <= lhs_end) {
struct scsi_vpd_id_descriptor *rhs_id;
rhs_id = (struct scsi_vpd_id_descriptor *)rhs;
while (rhs_id <= rhs_last
&& (rhs_id->identifier + rhs_id->length) <= rhs_end) {
if (rhs_id->length == lhs_id->length
&& memcmp(rhs_id->identifier, lhs_id->identifier,
rhs_id->length) == 0)
return (0);
rhs_id = (struct scsi_vpd_id_descriptor *)
(rhs_id->identifier + rhs_id->length);
}
lhs_id = (struct scsi_vpd_id_descriptor *)
(lhs_id->identifier + lhs_id->length);
}
return (-1);
}
#ifdef _KERNEL
static void
init_scsi_delay(void)

View File

@ -115,6 +115,7 @@ struct scsi_request_sense
{
u_int8_t opcode;
u_int8_t byte2;
#define SRS_DESC 0x01
u_int8_t unused[2];
u_int8_t length;
u_int8_t control;
@ -128,17 +129,33 @@ struct scsi_test_unit_ready
u_int8_t control;
};
struct scsi_send_diag
{
u_int8_t opcode;
u_int8_t byte2;
#define SSD_UOL 0x01
#define SSD_DOL 0x02
#define SSD_SELFTEST 0x04
#define SSD_PF 0x10
u_int8_t unused[1];
u_int8_t paramlen[2];
u_int8_t control;
struct scsi_receive_diag {
uint8_t opcode;
uint8_t byte2;
#define SRD_PCV 0x01
uint8_t page_code;
uint8_t length[2];
uint8_t control;
};
struct scsi_send_diag {
uint8_t opcode;
uint8_t byte2;
#define SSD_UNITOFFL 0x01
#define SSD_DEVOFFL 0x02
#define SSD_SELFTEST 0x04
#define SSD_PF 0x10
#define SSD_SELF_TEST_CODE_MASK 0xE0
#define SSD_SELF_TEST_CODE_SHIFT 5
#define SSD_SELF_TEST_CODE_NONE 0x00
#define SSD_SELF_TEST_CODE_BG_SHORT 0x01
#define SSD_SELF_TEST_CODE_BG_EXTENDED 0x02
#define SSD_SELF_TEST_CODE_BG_ABORT 0x04
#define SSD_SELF_TEST_CODE_FG_SHORT 0x05
#define SSD_SELF_TEST_CODE_FG_EXTENDED 0x06
uint8_t reserved;
uint8_t length[2];
uint8_t control;
};
struct scsi_sense
@ -894,11 +911,12 @@ struct scsi_vpd_id_naa_basic
uint8_t naa : 4;
uint8_t naa_desig : 4;
*/
#define SVPD_ID_NAA_NAA_SHIFT 4
#define SVPD_ID_NAA_IEEE_EXT 0x02
#define SVPD_ID_NAA_LOCAL_REG 0x03
#define SVPD_ID_NAA_IEEE_REG 0x05
#define SVPD_ID_NAA_IEEE_REG_EXT 0x06
uint8_t naa_data[0];
uint8_t naa_data[];
};
struct scsi_vpd_id_naa_ieee_extended_id
@ -1322,7 +1340,12 @@ void scsi_print_inquiry(struct scsi_inquiry_data *inq_data);
u_int scsi_calc_syncsrate(u_int period_factor);
u_int scsi_calc_syncparam(u_int period);
uint8_t * scsi_get_sas_addr(struct scsi_vpd_device_id *id, uint32_t len);
typedef int (*scsi_devid_checkfn_t)(uint8_t *);
int scsi_devid_is_naa_ieee_reg(uint8_t *bufp);
int scsi_devid_is_sas_target(uint8_t *bufp);
uint8_t * scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len,
scsi_devid_checkfn_t ck_fn);
void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
@ -1439,6 +1462,22 @@ void scsi_synchronize_cache(struct ccb_scsiio *csio,
u_int32_t begin_lba, u_int16_t lb_count,
u_int8_t sense_len, u_int32_t timeout);
void scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
union ccb*),
uint8_t tag_action, int pcv,
uint8_t page_code, uint8_t *data_ptr,
uint16_t allocation_length,
uint8_t sense_len, uint32_t timeout);
void scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
uint8_t tag_action, int unit_offline,
int device_offline, int self_test, int page_format,
int self_test_code, uint8_t *data_ptr,
uint16_t param_list_length, uint8_t sense_len,
uint32_t timeout);
void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
u_int8_t tag_action, int readop, u_int8_t byte2,
@ -1455,6 +1494,8 @@ void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry);
int scsi_static_inquiry_match(caddr_t inqbuffer,
caddr_t table_entry);
int scsi_devid_match(uint8_t *rhs, size_t rhs_len,
uint8_t *lhs, size_t lhs_len);
static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
int *error_code, int *sense_key,

View File

@ -548,8 +548,8 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
&& ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
|| (ccb->ccb_h.func_code == XPT_DEV_MATCH)
|| (ccb->ccb_h.func_code == XPT_SMP_IO)
|| ((ccb->ccb_h.func_code == XPT_GDEV_ADVINFO)
&& (ccb->cgdai.bufsiz > 0)))) {
|| ((ccb->ccb_h.func_code == XPT_DEV_ADVINFO)
&& (ccb->cdai.bufsiz > 0)))) {
bzero(&mapinfo, sizeof(mapinfo));

View File

@ -542,6 +542,7 @@ static const int scsi_quirk_table_size =
static cam_status proberegister(struct cam_periph *periph,
void *arg);
static void probeschedule(struct cam_periph *probe_periph);
static int device_has_vpd(struct cam_ed *device, uint8_t page_id);
static void probestart(struct cam_periph *periph, union ccb *start_ccb);
static void proberequestdefaultnegotiation(struct cam_periph *periph);
static int proberequestbackoff(struct cam_periph *periph,
@ -1460,7 +1461,7 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
path->device->device_id = (uint8_t *)devid;
}
} else if (cam_periph_error(done_ccb, 0,
SF_RETRY_UA|SF_NO_PRINT,
SF_RETRY_UA,
&softc->saved_ccb) == ERESTART) {
return;
} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
@ -1506,9 +1507,9 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
(u_int8_t *)malloc((serial_buf->length + 1),
M_CAMXPT, M_NOWAIT);
if (path->device->serial_num != NULL) {
bcopy(serial_buf->serial_num,
path->device->serial_num,
serial_buf->length);
memcpy(path->device->serial_num,
serial_buf->serial_num,
serial_buf->length);
path->device->serial_num_len =
serial_buf->length;
path->device->serial_num[serial_buf->length]
@ -2433,28 +2434,77 @@ scsi_devise_transport(struct cam_path *path)
}
static void
scsi_getdev_advinfo(union ccb *start_ccb)
scsi_dev_advinfo(union ccb *start_ccb)
{
struct cam_ed *device;
struct ccb_getdev_advinfo *cgdai;
struct ccb_dev_advinfo *cdai;
off_t amt;
device = start_ccb->ccb_h.path->device;
cgdai = &start_ccb->cgdai;
switch(cgdai->buftype) {
case CGDAI_TYPE_SCSI_DEVID:
cgdai->provsiz = device->device_id_len;
cdai = &start_ccb->cdai;
switch(cdai->buftype) {
case CDAI_TYPE_SCSI_DEVID:
if (cdai->flags & CDAI_FLAG_STORE)
break;
cdai->provsiz = device->device_id_len;
if (device->device_id_len == 0)
break;
amt = device->device_id_len;
if (cgdai->provsiz > cgdai->bufsiz)
amt = cgdai->bufsiz;
bcopy(device->device_id, cgdai->buf, amt);
if (cdai->provsiz > cdai->bufsiz)
amt = cdai->bufsiz;
memcpy(cdai->buf, device->device_id, amt);
break;
case CDAI_TYPE_SERIAL_NUM:
if (cdai->flags & CDAI_FLAG_STORE)
break;
cdai->provsiz = device->serial_num_len;
if (device->serial_num_len == 0)
break;
amt = device->serial_num_len;
if (cdai->provsiz > cdai->bufsiz)
amt = cdai->bufsiz;
memcpy(cdai->buf, device->serial_num, amt);
break;
case CDAI_TYPE_PHYS_PATH:
if (cdai->flags & CDAI_FLAG_STORE) {
if (device->physpath != NULL)
free(device->physpath, M_CAMXPT);
device->physpath_len = cdai->bufsiz;
/* Clear existing buffer if zero length */
if (cdai->bufsiz == 0)
break;
device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
if (device->physpath == NULL) {
start_ccb->ccb_h.status = CAM_REQ_ABORTED;
return;
}
memcpy(device->physpath, cdai->buf, cdai->bufsiz);
} else {
cdai->provsiz = device->physpath_len;
if (device->physpath_len == 0)
break;
amt = device->physpath_len;
if (cdai->provsiz > cdai->bufsiz)
amt = cdai->bufsiz;
memcpy(cdai->buf, device->physpath, amt);
}
break;
default:
break;
}
start_ccb->ccb_h.status = CAM_REQ_CMP;
if (cdai->flags & CDAI_FLAG_STORE) {
int owned;
owned = mtx_owned(start_ccb->ccb_h.path->bus->sim->mtx);
if (owned == 0)
mtx_lock(start_ccb->ccb_h.path->bus->sim->mtx);
xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
(void *)(uintptr_t)cdai->buftype);
if (owned == 0)
mtx_unlock(start_ccb->ccb_h.path->bus->sim->mtx);
}
}
static void
@ -2486,9 +2536,9 @@ scsi_action(union ccb *start_ccb)
(*(sim->sim_action))(sim, start_ccb);
break;
}
case XPT_GDEV_ADVINFO:
case XPT_DEV_ADVINFO:
{
scsi_getdev_advinfo(start_ccb);
scsi_dev_advinfo(start_ccb);
break;
}
default: