Refactor NVMe CAM integration.

- Remove layering violation, when NVMe SIM code accessed CAM internal
device structures to set pointers on controller and namespace data.
Instead make NVMe XPT probe fetch the data directly from hardware.
 - Cleanup NVMe SIM code, fixing support for multiple namespaces per
controller (reporting them as LUNs) and adding controller detach support
and run-time namespace change notifications.
 - Add initial support for namespace change async events.  So far only
in CAM mode, but it allows run-time namespace arrival and departure.
 - Add missing nvme_notify_fail_consumers() call on controller detach.
Together with previous changes this allows NVMe device detach/unplug.

Non-CAM mode still requires a lot of love to stay on par, but at least
CAM mode code should not stay in the way so much, becoming much more
self-sufficient.

Reviewed by:	imp
MFC after:	1 month
Sponsored by:	iXsystems, Inc.
This commit is contained in:
mav 2018-05-25 03:34:33 +00:00
parent 868a231539
commit a8d82e59ae
9 changed files with 283 additions and 157 deletions

View File

@ -5011,6 +5011,8 @@ xpt_release_device(struct cam_ed *device)
free(device->physpath, M_CAMXPT);
free(device->rcap_buf, M_CAMXPT);
free(device->serial_num, M_CAMXPT);
free(device->nvme_data, M_CAMXPT);
free(device->nvme_cdata, M_CAMXPT);
taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
}

View File

@ -155,8 +155,8 @@ struct cam_ed {
STAILQ_ENTRY(cam_ed) highpowerq_entry;
struct mtx device_mtx;
struct task device_destroy_task;
const struct nvme_controller_data *nvme_cdata;
const struct nvme_namespace_data *nvme_data;
struct nvme_controller_data *nvme_cdata;
struct nvme_namespace_data *nvme_data;
};
/*

View File

@ -84,17 +84,17 @@ static struct periph_driver nvme_probe_driver =
PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver);
typedef enum {
NVME_PROBE_IDENTIFY,
NVME_PROBE_IDENTIFY_CD,
NVME_PROBE_IDENTIFY_NS,
NVME_PROBE_DONE,
NVME_PROBE_INVALID,
NVME_PROBE_RESET
NVME_PROBE_INVALID
} nvme_probe_action;
static char *nvme_probe_action_text[] = {
"NVME_PROBE_IDENTIFY",
"NVME_PROBE_IDENTIFY_CD",
"NVME_PROBE_IDENTIFY_NS",
"NVME_PROBE_DONE",
"NVME_PROBE_INVALID",
"NVME_PROBE_RESET",
"NVME_PROBE_INVALID"
};
#define NVME_PROBE_SET_ACTION(softc, newaction) \
@ -113,6 +113,10 @@ typedef enum {
typedef struct {
TAILQ_HEAD(, ccb_hdr) request_ccbs;
union {
struct nvme_controller_data cd;
struct nvme_namespace_data ns;
};
nvme_probe_action action;
nvme_probe_flags flags;
int restart;
@ -137,6 +141,7 @@ static cam_status nvme_probe_register(struct cam_periph *periph,
void *arg);
static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph);
static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb);
static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb);
static void nvme_probe_cleanup(struct cam_periph *periph);
//static void nvme_find_quirk(struct cam_ed *device);
static void nvme_scan_lun(struct cam_periph *periph,
@ -240,7 +245,7 @@ nvme_probe_schedule(struct cam_periph *periph)
softc = (nvme_probe_softc *)periph->softc;
ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY);
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
softc->flags |= NVME_PROBE_NO_ANNOUNCE;
@ -254,10 +259,8 @@ static void
nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
{
struct ccb_nvmeio *nvmeio;
struct ccb_scsiio *csio;
nvme_probe_softc *softc;
struct cam_path *path;
const struct nvme_namespace_data *nvme_data;
lun_id_t lun;
CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n"));
@ -265,57 +268,163 @@ nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
softc = (nvme_probe_softc *)periph->softc;
path = start_ccb->ccb_h.path;
nvmeio = &start_ccb->nvmeio;
csio = &start_ccb->csio;
nvme_data = periph->path->device->nvme_data;
lun = xpt_path_lun_id(periph->path);
if (softc->restart) {
softc->restart = 0;
if (periph->path->device->flags & CAM_DEV_UNCONFIGURED)
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_RESET);
else
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY);
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
}
/*
* Other transports have to ask their SIM to do a lot of action.
* NVMe doesn't, so don't do the dance. Just do things
* directly.
*/
switch (softc->action) {
case NVME_PROBE_RESET:
/* FALLTHROUGH */
case NVME_PROBE_IDENTIFY:
nvme_device_transport(path);
/*
* Test for lun == CAM_LUN_WILDCARD is lame, but
* appears to be necessary here. XXX
*/
lun = xpt_path_lun_id(periph->path);
if (lun == CAM_LUN_WILDCARD ||
periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
path->device->flags &= ~CAM_DEV_UNCONFIGURED;
xpt_acquire_device(path->device);
start_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
xpt_action(start_ccb);
xpt_async(AC_FOUND_DEVICE, path, start_ccb);
}
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
case NVME_PROBE_IDENTIFY_CD:
cam_fill_nvmeadmin(nvmeio,
0, /* retries */
nvme_probe_done, /* cbfcnp */
CAM_DIR_IN, /* flags */
(uint8_t *)&softc->cd, /* data_ptr */
sizeof(softc->cd), /* dxfer_len */
30 * 1000); /* timeout 30s */
nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0,
1, 0, 0, 0, 0, 0);
break;
case NVME_PROBE_IDENTIFY_NS:
cam_fill_nvmeadmin(nvmeio,
0, /* retries */
nvme_probe_done, /* cbfcnp */
CAM_DIR_IN, /* flags */
(uint8_t *)&softc->ns, /* data_ptr */
sizeof(softc->ns), /* dxfer_len */
30 * 1000); /* timeout 30s */
nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun,
0, 0, 0, 0, 0, 0);
break;
default:
panic("nvme_probe_start: invalid action state 0x%x\n", softc->action);
}
/*
* Probing is now done. We need to complete any lingering items
* in the queue, though there shouldn't be any.
*/
xpt_release_ccb(start_ccb);
CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
while ((start_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
TAILQ_REMOVE(&softc->request_ccbs,
&start_ccb->ccb_h, periph_links.tqe);
start_ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(start_ccb);
start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
xpt_action(start_ccb);
}
static void
nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb)
{
struct nvme_namespace_data *nvme_data;
struct nvme_controller_data *nvme_cdata;
nvme_probe_softc *softc;
struct cam_path *path;
cam_status status;
u_int32_t priority;
int found = 1;
CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n"));
softc = (nvme_probe_softc *)periph->softc;
path = done_ccb->ccb_h.path;
priority = done_ccb->ccb_h.pinfo.priority;
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
if (cam_periph_error(done_ccb,
0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0
) == ERESTART) {
out:
/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
cam_release_devq(path, 0, 0, 0, FALSE);
return;
}
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
/* Don't wedge the queue */
xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
}
status = done_ccb->ccb_h.status & CAM_STATUS_MASK;
/*
* If we get to this point, we got an error status back
* from the inquiry and the error status doesn't require
* automatically retrying the command. Therefore, the
* inquiry failed. If we had inquiry information before
* for this device, but this latest inquiry command failed,
* the device has probably gone away. If this device isn't
* already marked unconfigured, notify the peripheral
* drivers that this device is no more.
*/
device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
xpt_async(AC_LOST_DEVICE, path, NULL);
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID);
found = 0;
goto done;
}
if (softc->restart)
goto done;
switch (softc->action) {
case NVME_PROBE_IDENTIFY_CD:
nvme_controller_data_swapbytes(&softc->cd);
nvme_cdata = path->device->nvme_cdata;
if (nvme_cdata == NULL) {
nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT,
M_NOWAIT);
if (nvme_cdata == NULL) {
xpt_print(path, "Can't allocate memory");
goto device_fail;
}
}
bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata));
path->device->nvme_cdata = nvme_cdata;
// nvme_find_quirk(path->device);
nvme_device_transport(path);
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS);
xpt_release_ccb(done_ccb);
xpt_schedule(periph, priority);
goto out;
case NVME_PROBE_IDENTIFY_NS:
nvme_namespace_data_swapbytes(&softc->ns);
/* Check that the namespace exists. */
if (softc->ns.nsze == 0)
goto device_fail;
nvme_data = path->device->nvme_data;
if (nvme_data == NULL) {
nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT,
M_NOWAIT);
if (nvme_data == NULL) {
xpt_print(path, "Can't allocate memory");
goto device_fail;
}
}
bcopy(&softc->ns, nvme_data, sizeof(*nvme_data));
path->device->nvme_data = nvme_data;
if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
path->device->flags &= ~CAM_DEV_UNCONFIGURED;
xpt_acquire_device(path->device);
done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
xpt_action(done_ccb);
xpt_async(AC_FOUND_DEVICE, path, done_ccb);
}
NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
break;
default:
panic("nvme_probe_done: invalid action state 0x%x\n", softc->action);
}
done:
if (softc->restart) {
softc->restart = 0;
xpt_release_ccb(done_ccb);
nvme_probe_schedule(periph);
goto out;
}
xpt_release_ccb(done_ccb);
CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
TAILQ_REMOVE(&softc->request_ccbs,
&done_ccb->ccb_h, periph_links.tqe);
done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR;
xpt_done(done_ccb);
}
/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
cam_release_devq(path, 0, 0, 0, FALSE);
cam_periph_invalidate(periph);
cam_periph_release_locked(periph);
}

View File

@ -439,6 +439,24 @@ nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
}
}
void
nvme_notify_ns(struct nvme_controller *ctrlr, int nsid)
{
struct nvme_consumer *cons;
struct nvme_namespace *ns = &ctrlr->ns[nsid - 1];
uint32_t i;
if (!ctrlr->is_initialized)
return;
for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
cons = &nvme_consumer[i];
if (cons->id != INVALID_CONSUMER_ID && cons->ns_fn != NULL)
ns->cons_cookie[cons->id] =
(*cons->ns_fn)(ns, ctrlr->cons_cookie[cons->id]);
}
}
struct nvme_consumer *
nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
nvme_cons_async_fn_t async_fn,

View File

@ -115,7 +115,7 @@
#define NVME_CMD_FUSE_SHIFT (8)
#define NVME_CMD_FUSE_MASK (0x3)
#define NVME_CMD_SET_OPC(opc) (htole16(((opc) & NVME_CMD_OPC_MASK) << NVME_CMD_OPC_SHIFT))
#define NVME_CMD_SET_OPC(opc) (htole16(((uint16_t)(opc) & NVME_CMD_OPC_MASK) << NVME_CMD_OPC_SHIFT))
#define NVME_STATUS_P_SHIFT (0)
#define NVME_STATUS_P_MASK (0x1)
@ -1091,6 +1091,12 @@ struct nvme_firmware_page {
_Static_assert(sizeof(struct nvme_firmware_page) == 512, "bad size for nvme_firmware_page");
struct nvme_ns_list {
uint32_t ns[1024];
} __packed __aligned(4);
_Static_assert(sizeof(struct nvme_ns_list) == 4096, "bad size for nvme_ns_list");
struct intel_log_temp_stats
{
uint64_t current;
@ -1469,6 +1475,15 @@ void nvme_firmware_page_swapbytes(struct nvme_firmware_page *s)
s->revision[i] = le64toh(s->revision[i]);
}
static inline
void nvme_ns_list_swapbytes(struct nvme_ns_list *s)
{
int i;
for (i = 0; i < 1024; i++)
s->ns[i] = le32toh(s->ns[i]);
}
static inline
void intel_log_temp_stats_swapbytes(struct intel_log_temp_stats *s)
{

View File

@ -564,6 +564,7 @@ is_log_page_id_valid(uint8_t page_id)
case NVME_LOG_ERROR:
case NVME_LOG_HEALTH_INFORMATION:
case NVME_LOG_FIRMWARE_SLOT:
case NVME_LOG_CHANGED_NAMESPACE:
return (TRUE);
}
@ -587,6 +588,9 @@ nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
case NVME_LOG_FIRMWARE_SLOT:
log_page_size = sizeof(struct nvme_firmware_page);
break;
case NVME_LOG_CHANGED_NAMESPACE:
log_page_size = sizeof(struct nvme_ns_list);
break;
default:
log_page_size = 0;
break;
@ -625,6 +629,7 @@ nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
{
struct nvme_async_event_request *aer = arg;
struct nvme_health_information_page *health_info;
struct nvme_ns_list *nsl;
struct nvme_error_information_entry *err;
int i;
@ -652,6 +657,10 @@ nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
nvme_firmware_page_swapbytes(
(struct nvme_firmware_page *)aer->log_page_buffer);
break;
case NVME_LOG_CHANGED_NAMESPACE:
nvme_ns_list_swapbytes(
(struct nvme_ns_list *)aer->log_page_buffer);
break;
case INTEL_LOG_TEMP_STATS:
intel_log_temp_stats_swapbytes(
(struct intel_log_temp_stats *)aer->log_page_buffer);
@ -676,6 +685,14 @@ nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
~health_info->critical_warning;
nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
aer->ctrlr->async_event_config, NULL, NULL);
} else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
!nvme_use_nvd) {
nsl = (struct nvme_ns_list *)aer->log_page_buffer;
for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
if (nsl->ns[i] > NVME_MAX_NAMESPACES)
break;
nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
}
}
@ -712,7 +729,8 @@ nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
/* Associated log page is in bits 23:16 of completion entry dw0. */
aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
" page 0x%02x)\n", (cpl->cdw0 & 0x03), (cpl->cdw0 & 0xFF00) >> 8,
aer->log_page_id);
if (is_log_page_id_valid(aer->log_page_id)) {
@ -762,8 +780,12 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
struct nvme_async_event_request *aer;
uint32_t i;
ctrlr->async_event_config = 0xFF;
ctrlr->async_event_config &= ~NVME_CRIT_WARN_ST_RESERVED_MASK;
ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
NVME_CRIT_WARN_ST_READ_ONLY |
NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
if (ctrlr->cdata.ver >= NVME_REV(1, 2))
ctrlr->async_event_config |= 0x300;
status.done = 0;
nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
@ -774,8 +796,8 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
(status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
(status.cpl.cdw0 & 0xFFFF) == 0x0000) {
nvme_printf(ctrlr, "temperature threshold not supported\n");
ctrlr->async_event_config &= ~NVME_CRIT_WARN_ST_TEMPERATURE;
}
} else
ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
nvme_ctrlr_cmd_set_async_event_config(ctrlr,
ctrlr->async_event_config, NULL, NULL);
@ -1285,6 +1307,8 @@ nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
if (ctrlr->resource == NULL)
goto nores;
nvme_notify_fail_consumers(ctrlr);
for (i = 0; i < NVME_MAX_NAMESPACES; i++)
nvme_ns_destruct(&ctrlr->ns[i]);

View File

@ -214,7 +214,7 @@ nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
void
nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
uint8_t state, nvme_cb_fn_t cb_fn, void *cb_arg)
uint32_t state, nvme_cb_fn_t cb_fn, void *cb_arg)
{
uint32_t cdw11;

View File

@ -312,8 +312,8 @@ struct nvme_controller {
struct cdev *cdev;
/** bit mask of critical warning types currently enabled for async events */
uint8_t async_event_config;
/** bit mask of event types currently enabled for async events */
uint32_t async_event_config;
uint32_t num_aers;
struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
@ -399,7 +399,7 @@ void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
uint32_t num_queues, nvme_cb_fn_t cb_fn,
void *cb_arg);
void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
uint8_t state,
uint32_t state,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
@ -544,6 +544,7 @@ void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
uint32_t log_page_size);
void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
void nvme_notify_new_controller(struct nvme_controller *ctrlr);
void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
void nvme_ctrlr_intx_handler(void *arg);
void nvme_ctrlr_poll(struct nvme_controller *ctrlr);

View File

@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$");
#include <cam/cam_ccb.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/cam_xpt_internal.h> // Yes, this is wrong.
#include <cam/cam_debug.h>
#include <dev/pci/pcivar.h>
@ -54,13 +53,11 @@ static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb);
static void nvme_sim_poll(struct cam_sim *sim);
#define sim2softc(sim) ((struct nvme_sim_softc *)cam_sim_softc(sim))
#define sim2ns(sim) (sim2softc(sim)->s_ns)
#define sim2ctrlr(sim) (sim2softc(sim)->s_ctrlr)
struct nvme_sim_softc
{
struct nvme_controller *s_ctrlr;
struct nvme_namespace *s_ns;
struct cam_sim *s_sim;
struct cam_path *s_path;
};
@ -146,18 +143,11 @@ static void
nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
{
struct nvme_controller *ctrlr;
struct nvme_namespace *ns;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
("nvme_sim_action: func= %#x\n",
ccb->ccb_h.func_code));
/*
* XXX when we support multiple namespaces in the base driver we'll need
* to revisit how all this gets stored and saved in the periph driver's
* reserved areas. Right now we store all three in the softc of the sim.
*/
ns = sim2ns(sim);
ctrlr = sim2ctrlr(sim);
mtx_assert(&ctrlr->lock, MA_OWNED);
@ -193,11 +183,11 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
cpi->version_num = 1;
cpi->hba_inquiry = 0;
cpi->target_sprt = 0;
cpi->hba_misc = PIM_UNMAPPED /* | PIM_NOSCAN */;
cpi->hba_misc = PIM_UNMAPPED | PIM_NOSCAN;
cpi->hba_eng_cnt = 0;
cpi->max_target = 0;
cpi->max_lun = ctrlr->cdata.nn;
cpi->maxio = nvme_ns_get_max_io_xfer_size(ns);
cpi->maxio = ctrlr->max_xfer_size;
cpi->initiator_id = 0;
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = nvme_link_kBps(ctrlr);
@ -209,7 +199,7 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
cpi->transport_version = nvme_mmio_read_4(ctrlr, vs);
cpi->protocol = PROTO_NVME;
cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs);
cpi->xport_specific.nvme.nsid = ns->id;
cpi->xport_specific.nvme.nsid = xpt_path_lun_id(ccb->ccb_h.path);
cpi->xport_specific.nvme.domain = pci_get_domain(dev);
cpi->xport_specific.nvme.bus = pci_get_bus(dev);
cpi->xport_specific.nvme.slot = pci_get_slot(dev);
@ -285,51 +275,45 @@ nvme_sim_poll(struct cam_sim *sim)
static void *
nvme_sim_new_controller(struct nvme_controller *ctrlr)
{
struct nvme_sim_softc *sc;
struct cam_devq *devq;
int max_trans;
int unit;
struct nvme_sim_softc *sc = NULL;
max_trans = ctrlr->max_hw_pend_io;
unit = device_get_unit(ctrlr->dev);
devq = cam_simq_alloc(max_trans);
if (devq == NULL)
return NULL;
return (NULL);
sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK);
sc->s_ctrlr = ctrlr;
sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll,
"nvme", sc, unit, &ctrlr->lock, max_trans, max_trans, devq);
"nvme", sc, device_get_unit(ctrlr->dev),
&ctrlr->lock, max_trans, max_trans, devq);
if (sc->s_sim == NULL) {
printf("Failed to allocate a sim\n");
cam_simq_free(devq);
goto err1;
}
if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) {
printf("Failed to create a bus\n");
goto err2;
}
if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
printf("Failed to create a path\n");
goto err3;
}
return (sc);
err3:
xpt_bus_deregister(cam_sim_path(sc->s_sim));
err2:
cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
err1:
free(sc, M_NVME);
return NULL;
}
return sc;
}
static void
nvme_sim_rescan_target(struct nvme_controller *ctrlr, struct cam_path *path)
{
union ccb *ccb;
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
printf("unable to alloc CCB for rescan\n");
return;
}
if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
printf("unable to copy path for rescan\n");
xpt_free_ccb(ccb);
return;
}
xpt_rescan(ccb);
return (NULL);
}
static void *
@ -337,70 +321,43 @@ nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg)
{
struct nvme_sim_softc *sc = sc_arg;
struct nvme_controller *ctrlr = sc->s_ctrlr;
int i;
sc->s_ns = ns;
/*
* XXX this is creating one bus per ns, but it should be one
* XXX target per controller, and one LUN per namespace.
* XXX Current drives only support one NS, so there's time
* XXX to fix it later when new drives arrive.
*
* XXX I'm pretty sure the xpt_bus_register() call below is
* XXX like super lame and it really belongs in the sim_new_ctrlr
* XXX callback. Then the create_path below would be pretty close
* XXX to being right. Except we should be per-ns not per-ctrlr
* XXX data.
*/
union ccb *ccb;
mtx_lock(&ctrlr->lock);
/* Create bus */
/*
* XXX do I need to lock ctrlr->lock ?
* XXX do I need to lock the path?
* ata and scsi seem to in their code, but their discovery is
* somewhat more asynchronous. We're only every called one at a
* time, and nothing is in parallel.
*/
i = 0;
if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS)
goto error;
i++;
if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim),
1, ns->id) != CAM_REQ_CMP)
goto error;
i++;
sc->s_path->device->nvme_data = nvme_ns_get_data(ns);
sc->s_path->device->nvme_cdata = nvme_ctrlr_get_data(ns->ctrlr);
/* Scan bus */
nvme_sim_rescan_target(ctrlr, sc->s_path);
mtx_unlock(&ctrlr->lock);
return ns;
error:
switch (i) {
case 2:
xpt_free_path(sc->s_path);
case 1:
xpt_bus_deregister(cam_sim_path(sc->s_sim));
case 0:
cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
printf("unable to alloc CCB for rescan\n");
return (NULL);
}
if (xpt_create_path(&ccb->ccb_h.path, /*periph*/NULL,
cam_sim_path(sc->s_sim), 0, ns->id) != CAM_REQ_CMP) {
printf("unable to create path for rescan\n");
xpt_free_ccb(ccb);
return (NULL);
}
xpt_rescan(ccb);
mtx_unlock(&ctrlr->lock);
return NULL;
return (ns);
}
static void
nvme_sim_controller_fail(void *ctrlr_arg)
{
/* XXX cleanup XXX */
struct nvme_sim_softc *sc = ctrlr_arg;
struct nvme_controller *ctrlr = sc->s_ctrlr;
mtx_lock(&ctrlr->lock);
xpt_async(AC_LOST_DEVICE, sc->s_path, NULL);
xpt_free_path(sc->s_path);
xpt_bus_deregister(cam_sim_path(sc->s_sim));
cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
mtx_unlock(&ctrlr->lock);
free(sc, M_NVME);
}
struct nvme_consumer *consumer_cookie;