diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c new file mode 100644 index 000000000000..8efc0c011b05 --- /dev/null +++ b/sys/dev/mfi/mfi.c @@ -0,0 +1,1265 @@ +/*- + * Copyright (c) 2006 IronPort Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_mfi.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +static int mfi_alloc_commands(struct mfi_softc *); +static void mfi_release_command(struct mfi_command *cm); +static int mfi_comms_init(struct mfi_softc *); +static int mfi_polled_command(struct mfi_softc *, struct mfi_command *); +static int mfi_get_controller_info(struct mfi_softc *); +static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); +static void mfi_startup(void *arg); +static void mfi_intr(void *arg); +static void mfi_enable_intr(struct mfi_softc *sc); +static void mfi_ldprobe_inq(struct mfi_softc *sc); +static void mfi_ldprobe_inq_complete(struct mfi_command *); +static int mfi_ldprobe_capacity(struct mfi_softc *sc, int id); +static void mfi_ldprobe_capacity_complete(struct mfi_command *); +static int mfi_ldprobe_tur(struct mfi_softc *sc, int id); +static void mfi_ldprobe_tur_complete(struct mfi_command *); +static int mfi_add_ld(struct mfi_softc *sc, int id, uint64_t, uint32_t); +static struct mfi_command * mfi_bio_command(struct mfi_softc *); +static void mfi_bio_complete(struct mfi_command *); +static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *); +static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); +static void mfi_complete(struct mfi_softc *, struct mfi_command *); + +/* Management interface */ +static d_open_t mfi_open; +static d_close_t mfi_close; +static d_ioctl_t mfi_ioctl; + +static struct cdevsw mfi_cdevsw = { + .d_version = D_VERSION, + .d_flags = 0, + .d_open = mfi_open, + .d_close = mfi_close, + .d_ioctl = mfi_ioctl, + .d_name = "mfi", +}; + +MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); + +#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH + +static int +mfi_transition_firmware(struct mfi_softc *sc) +{ + int32_t fw_state, cur_state; + int max_wait, i; + + fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK; + while (fw_state != MFI_FWSTATE_READY) { + if (bootverbose) + device_printf(sc->mfi_dev, "Waiting for firmware to " + "become ready\n"); + cur_state = fw_state; + switch (fw_state) { + case MFI_FWSTATE_FAULT: + device_printf(sc->mfi_dev, "Firmware fault\n"); + return (ENXIO); + case MFI_FWSTATE_WAIT_HANDSHAKE: + MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); + max_wait = 2; + break; + case MFI_FWSTATE_OPERATIONAL: + MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); + max_wait = 10; + break; + case MFI_FWSTATE_UNDEFINED: + case MFI_FWSTATE_BB_INIT: + max_wait = 2; + break; + case MFI_FWSTATE_FW_INIT: + case MFI_FWSTATE_DEVICE_SCAN: + case MFI_FWSTATE_FLUSH_CACHE: + max_wait = 20; + break; + default: + device_printf(sc->mfi_dev,"Unknown firmware state %d\n", + fw_state); + return (ENXIO); + } + for (i = 0; i < (max_wait * 10); i++) { + fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK; + if (fw_state == cur_state) + DELAY(100000); + else + break; + } + if (fw_state == cur_state) { + device_printf(sc->mfi_dev, "firmware stuck in state " + "%#x\n", fw_state); + return (ENXIO); + } + } + return (0); +} + +static void +mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + uint32_t *addr; + + addr = arg; + *addr = segs[0].ds_addr; +} + +int +mfi_attach(struct mfi_softc *sc) +{ + uint32_t status; + int error, commsz, framessz, sensesz; + int frames, unit; + + mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); + TAILQ_INIT(&sc->mfi_ld_tqh); + + mfi_initq_free(sc); + mfi_initq_ready(sc); + mfi_initq_busy(sc); + mfi_initq_bio(sc); + + /* Before we get too far, see if the firmware is working */ + if ((error = mfi_transition_firmware(sc)) != 0) { + device_printf(sc->mfi_dev, "Firmware not in READY state, " + "error %d\n", error); + return (ENXIO); + } + + /* + * Get information needed for sizing the contiguous memory for the + * frame pool. Size down the sgl parameter since we know that + * we will never need more than what's required for MAXPHYS. + * It would be nice if these constants were available at runtime + * instead of compile time. + */ + status = MFI_READ4(sc, MFI_OMSG0); + sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; + sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; + sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1)); + + /* + * Create the dma tag for data buffers. Used both for block I/O + * and for various internal data queries. + */ + if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ + sc->mfi_total_sgl, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + busdma_lock_mutex, /* lockfunc */ + &sc->mfi_io_lock, /* lockfuncarg */ + &sc->mfi_buffer_dmat)) { + device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); + return (ENOMEM); + } + + /* + * Allocate DMA memory for the comms queues. Keep it under 4GB for + * efficiency. The mfi_hwcomms struct includes space for 1 reply queue + * entry, so the calculated size here will be will be 1 more than + * mfi_max_fw_cmds. This is apparently a requirement of the hardware. + */ + commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + + sizeof(struct mfi_hwcomms); + if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + commsz, /* maxsize */ + 1, /* msegments */ + commsz, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->mfi_comms_dmat)) { + device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); + return (ENOMEM); + } + if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, + BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { + device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); + return (ENOMEM); + } + bzero(sc->mfi_comms, commsz); + bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, + sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0); + + /* + * Allocate DMA memory for the command frames. Keep them in the + * lower 4GB for efficiency. Calculate the size of the frames at + * the same time; the frame is 64 bytes plus space for the SG lists. + * The assumption here is that the SG list will start at the second + * 64 byte segment of the frame and not use the unused bytes in the + * frame. While this might seem wasteful, apparently the frames must + * be 64 byte aligned, so any savings would be negated by the extra + * alignment padding. + */ + if (sizeof(bus_addr_t) == 8) { + sc->mfi_sgsize = sizeof(struct mfi_sg64); + sc->mfi_flags |= MFI_FLAGS_SG64; + } else { + sc->mfi_sgsize = sizeof(struct mfi_sg64); + } + frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) / + MFI_FRAME_SIZE + 1; + sc->mfi_frame_size = frames * MFI_FRAME_SIZE; + framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds; + if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ + 64, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + framessz, /* maxsize */ + 1, /* nsegments */ + framessz, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->mfi_frames_dmat)) { + device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); + return (ENOMEM); + } + if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, + BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { + device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); + return (ENOMEM); + } + bzero(sc->mfi_frames, framessz); + bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, + sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0); + + /* + * Allocate DMA memory for the frame sense data. Keep them in the + * lower 4GB for efficiency + */ + sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; + if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ + 4, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + sensesz, /* maxsize */ + 1, /* nsegments */ + sensesz, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->mfi_sense_dmat)) { + device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); + return (ENOMEM); + } + if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, + BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { + device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); + return (ENOMEM); + } + bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, + sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0); + + if ((error = mfi_alloc_commands(sc)) != 0) + return (error); + + if ((error = mfi_comms_init(sc)) != 0) + return (error); + + if ((error = mfi_get_controller_info(sc)) != 0) + return (error); + +#if 0 + if ((error = mfi_setup_aen(sc)) != 0) + return (error); +#endif + + /* + * Set up the interrupt handler. XXX This should happen in + * mfi_pci.c + */ + sc->mfi_irq_rid = 0; + if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, + &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { + device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); + return (EINVAL); + } + if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, + mfi_intr, sc, &sc->mfi_intr)) { + device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); + return (EINVAL); + } + + /* Register a config hook to probe the bus for arrays */ + sc->mfi_ich.ich_func = mfi_startup; + sc->mfi_ich.ich_arg = sc; + if (config_intrhook_establish(&sc->mfi_ich) != 0) { + device_printf(sc->mfi_dev, "Cannot establish configuration " + "hook\n"); + return (EINVAL); + } + + /* + * Register a shutdown handler. + */ + if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, + sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { + device_printf(sc->mfi_dev, "Warning: shutdown event " + "registration failed\n"); + } + + /* + * Create the control device for doing management + */ + unit = device_get_unit(sc->mfi_dev); + sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, + 0640, "mfi%d", unit); + if (sc->mfi_cdev != NULL) + sc->mfi_cdev->si_drv1 = sc; + + return (0); +} + +static int +mfi_alloc_commands(struct mfi_softc *sc) +{ + struct mfi_command *cm; + int i, ncmds; + + /* + * XXX Should we allocate all the commands up front, or allocate on + * demand later like 'aac' does? + */ + ncmds = sc->mfi_max_fw_cmds; + sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF, + M_WAITOK | M_ZERO); + + for (i = 0; i < ncmds; i++) { + cm = &sc->mfi_commands[i]; + cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + + sc->mfi_frame_size * i); + cm->cm_frame_busaddr = sc->mfi_frames_busaddr + + sc->mfi_frame_size * i; + cm->cm_frame->header.context = i; + cm->cm_sense = &sc->mfi_sense[i]; + cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; + cm->cm_sc = sc; + if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, + &cm->cm_dmamap) == 0) + mfi_release_command(cm); + else + break; + sc->mfi_total_cmds++; + } + + return (0); +} + +static void +mfi_release_command(struct mfi_command *cm) +{ + uint32_t *hdr_data; + + /* + * Zero out the important fields of the frame, but make sure the + * context field is preserved + */ + hdr_data = (uint32_t *)cm->cm_frame; + hdr_data[0] = 0; + hdr_data[1] = 0; + + cm->cm_extra_frames = 0; + cm->cm_flags = 0; + cm->cm_complete = NULL; + cm->cm_private = NULL; + cm->cm_sg = 0; + cm->cm_total_frame_size = 0; + mfi_enqueue_free(cm); +} + +static int +mfi_comms_init(struct mfi_softc *sc) +{ + struct mfi_command *cm; + struct mfi_init_frame *init; + struct mfi_init_qinfo *qinfo; + int error; + + if ((cm = mfi_dequeue_free(sc)) == NULL) + return (EBUSY); + + /* + * Abuse the SG list area of the frame to hold the init_qinfo + * object; + */ + init = &cm->cm_frame->init; + qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); + + bzero(qinfo, sizeof(struct mfi_init_qinfo)); + qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; + qinfo->rq_addr_lo = sc->mfi_comms_busaddr + + offsetof(struct mfi_hwcomms, hw_reply_q); + qinfo->pi_addr_lo = sc->mfi_comms_busaddr + + offsetof(struct mfi_hwcomms, hw_pi); + qinfo->ci_addr_lo = sc->mfi_comms_busaddr + + offsetof(struct mfi_hwcomms, hw_ci); + + init->header.cmd = MFI_CMD_INIT; + init->header.data_len = sizeof(struct mfi_init_qinfo); + init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; + + if ((error = mfi_polled_command(sc, cm)) != 0) { + device_printf(sc->mfi_dev, "failed to send init command\n"); + return (error); + } + mfi_release_command(cm); + + return (0); +} + +static int +mfi_get_controller_info(struct mfi_softc *sc) +{ + struct mfi_command *cm; + struct mfi_dcmd_frame *dcmd; + struct mfi_ctrl_info *ci; + uint32_t max_sectors_1, max_sectors_2; + int error; + + if ((cm = mfi_dequeue_free(sc)) == NULL) + return (EBUSY); + + ci = malloc(sizeof(struct mfi_ctrl_info), M_MFIBUF, M_NOWAIT | M_ZERO); + if (ci == NULL) { + mfi_release_command(cm); + return (ENOMEM); + } + + dcmd = &cm->cm_frame->dcmd; + bzero(dcmd->mbox, MFI_MBOX_SIZE); + dcmd->header.cmd = MFI_CMD_DCMD; + dcmd->header.timeout = 0; + dcmd->header.data_len = sizeof(struct mfi_ctrl_info); + dcmd->opcode = MFI_DCMD_CTRL_GETINFO; + cm->cm_sg = &dcmd->sgl; + cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; + cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; + cm->cm_data = ci; + cm->cm_len = sizeof(struct mfi_ctrl_info); + + if ((error = mfi_mapcmd(sc, cm)) != 0) { + device_printf(sc->mfi_dev, "Controller info buffer map failed"); + free(ci, M_MFIBUF); + mfi_release_command(cm); + return (error); + } + + /* It's ok if this fails, just use default info instead */ + if ((error = mfi_polled_command(sc, cm)) != 0) { + device_printf(sc->mfi_dev, "Failed to get controller info\n"); + sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE / + MFI_SECTOR_LEN; + free(ci, M_MFIBUF); + mfi_release_command(cm); + return (0); + } + + bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); + + max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io; + max_sectors_2 = ci->max_request_size; + sc->mfi_max_io = min(max_sectors_1, max_sectors_2); + + free(ci, M_MFIBUF); + mfi_release_command(cm); + + return (error); +} + +static int +mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm) +{ + struct mfi_frame_header *hdr; + int tm = MFI_POLL_TIMEOUT_SECS * 1000000; + + hdr = &cm->cm_frame->header; + hdr->cmd_status = 0xff; + hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + + mfi_send_frame(sc, cm); + + while (hdr->cmd_status == 0xff) { + DELAY(1000); + tm -= 1000; + if (tm <= 0) + break; + } + + if (hdr->cmd_status == 0xff) { + device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr); + return (ETIMEDOUT); + } + + return (0); +} + +void +mfi_free(struct mfi_softc *sc) +{ + struct mfi_command *cm; + int i; + + if (sc->mfi_cdev != NULL) + destroy_dev(sc->mfi_cdev); + + if (sc->mfi_total_cmds != 0) { + for (i = 0; i < sc->mfi_total_cmds; i++) { + cm = &sc->mfi_commands[i]; + bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); + } + free(sc->mfi_commands, M_MFIBUF); + } + + if (sc->mfi_intr) + bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); + if (sc->mfi_irq != NULL) + bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, + sc->mfi_irq); + + if (sc->mfi_sense_busaddr != 0) + bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); + if (sc->mfi_sense != NULL) + bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, + sc->mfi_sense_dmamap); + if (sc->mfi_sense_dmat != NULL) + bus_dma_tag_destroy(sc->mfi_sense_dmat); + + if (sc->mfi_frames_busaddr != 0) + bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); + if (sc->mfi_frames != NULL) + bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, + sc->mfi_frames_dmamap); + if (sc->mfi_frames_dmat != NULL) + bus_dma_tag_destroy(sc->mfi_frames_dmat); + + if (sc->mfi_comms_busaddr != 0) + bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); + if (sc->mfi_comms != NULL) + bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, + sc->mfi_comms_dmamap); + if (sc->mfi_comms_dmat != NULL) + bus_dma_tag_destroy(sc->mfi_comms_dmat); + + if (sc->mfi_buffer_dmat != NULL) + bus_dma_tag_destroy(sc->mfi_buffer_dmat); + if (sc->mfi_parent_dmat != NULL) + bus_dma_tag_destroy(sc->mfi_parent_dmat); + + if (mtx_initialized(&sc->mfi_io_lock)) + mtx_destroy(&sc->mfi_io_lock); + + return; +} + +static void +mfi_startup(void *arg) +{ + struct mfi_softc *sc; + + sc = (struct mfi_softc *)arg; + + config_intrhook_disestablish(&sc->mfi_ich); + + mfi_enable_intr(sc); + mfi_ldprobe_inq(sc); +} + +static void +mfi_intr(void *arg) +{ + struct mfi_softc *sc; + struct mfi_command *cm; + uint32_t status, pi, ci, context; + + sc = (struct mfi_softc *)arg; + + status = MFI_READ4(sc, MFI_OSTS); + if ((status & MFI_OSTS_INTR_VALID) == 0) + return; + MFI_WRITE4(sc, MFI_OSTS, status); + + pi = sc->mfi_comms->hw_pi; + ci = sc->mfi_comms->hw_ci; + + mtx_lock(&sc->mfi_io_lock); + while (ci != pi) { + context = sc->mfi_comms->hw_reply_q[ci]; + sc->mfi_comms->hw_reply_q[ci] = 0xffffffff; + if (context == 0xffffffff) { + device_printf(sc->mfi_dev, "mfi_intr: invalid context " + "pi= %d ci= %d\n", pi, ci); + } else { + cm = &sc->mfi_commands[context]; + mfi_remove_busy(cm); + mfi_complete(sc, cm); + } + ci++; + if (ci == (sc->mfi_max_fw_cmds + 1)) { + ci = 0; + } + } + mtx_unlock(&sc->mfi_io_lock); + + sc->mfi_comms->hw_ci = ci; + + return; +} + +int +mfi_shutdown(struct mfi_softc *sc) +{ + struct mfi_dcmd_frame *dcmd; + struct mfi_command *cm; + int error; + + if ((cm = mfi_dequeue_free(sc)) == NULL) + return (EBUSY); + + /* AEN? */ + + dcmd = &cm->cm_frame->dcmd; + bzero(dcmd->mbox, MFI_MBOX_SIZE); + dcmd->header.cmd = MFI_CMD_DCMD; + dcmd->header.sg_count = 0; + dcmd->header.flags = MFI_FRAME_DIR_NONE; + dcmd->header.timeout = 0; + dcmd->header.data_len = 0; + dcmd->opcode = MFI_DCMD_CTRL_SHUTDOWN; + + if ((error = mfi_polled_command(sc, cm)) != 0) { + device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); + } + + return (error); +} + +static void +mfi_enable_intr(struct mfi_softc *sc) +{ + + MFI_WRITE4(sc, MFI_OMSK, 0x01); +} + +static void +mfi_ldprobe_inq(struct mfi_softc *sc) +{ + struct mfi_command *cm; + struct mfi_pass_frame *pass; + char *inq; + int i; + + /* Probe all possible targets with a SCSI INQ command */ + mtx_lock(&sc->mfi_io_lock); + sc->mfi_probe_count = 0; + for (i = 0; i < MFI_MAX_CHANNEL_DEVS; i++) { + inq = malloc(MFI_INQ_LENGTH, M_MFIBUF, M_NOWAIT|M_ZERO); + if (inq == NULL) + break; + cm = mfi_dequeue_free(sc); + if (cm == NULL) { + tsleep(mfi_startup, 0, "mfistart", 5 * hz); + i--; + continue; + } + pass = &cm->cm_frame->pass; + pass->header.cmd = MFI_CMD_LD_SCSI_IO; + pass->header.target_id = i; + pass->header.lun_id = 0; + pass->header.cdb_len = 6; + pass->header.timeout = 0; + pass->header.data_len = MFI_INQ_LENGTH; + bzero(pass->cdb, 16); + pass->cdb[0] = INQUIRY; + pass->cdb[4] = MFI_INQ_LENGTH; + pass->header.sense_len = MFI_SENSE_LEN; + pass->sense_addr_lo = cm->cm_sense_busaddr; + pass->sense_addr_hi = 0; + cm->cm_complete = mfi_ldprobe_inq_complete; + cm->cm_private = inq; + cm->cm_sg = &pass->sgl; + cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; + cm->cm_flags |= MFI_CMD_DATAIN; + cm->cm_data = inq; + cm->cm_len = MFI_INQ_LENGTH; + sc->mfi_probe_count++; + mfi_enqueue_ready(cm); + mfi_startio(sc); + } + + /* Sleep while the arrays are attaching */ + msleep(mfi_startup, &sc->mfi_io_lock, 0, "mfistart", 60 * hz); + mtx_unlock(&sc->mfi_io_lock); + + return; +} + +static void +mfi_ldprobe_inq_complete(struct mfi_command *cm) +{ + struct mfi_frame_header *hdr; + struct mfi_softc *sc; + struct scsi_inquiry_data *inq; + + sc = cm->cm_sc; + inq = cm->cm_private; + hdr = &cm->cm_frame->header; + + if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00) || + (SID_TYPE(inq) != T_DIRECT)) { + free(inq, M_MFIBUF); + mfi_release_command(cm); + if (--sc->mfi_probe_count <= 0) + wakeup(mfi_startup); + return; + } + + free(inq, M_MFIBUF); + mfi_release_command(cm); + mfi_ldprobe_tur(sc, hdr->target_id); +} + +static int +mfi_ldprobe_tur(struct mfi_softc *sc, int id) +{ + struct mfi_command *cm; + struct mfi_pass_frame *pass; + + cm = mfi_dequeue_free(sc); + if (cm == NULL) + return (EBUSY); + pass = &cm->cm_frame->pass; + pass->header.cmd = MFI_CMD_LD_SCSI_IO; + pass->header.target_id = id; + pass->header.lun_id = 0; + pass->header.cdb_len = 6; + pass->header.timeout = 0; + pass->header.data_len = 0; + bzero(pass->cdb, 16); + pass->cdb[0] = TEST_UNIT_READY; + pass->header.sense_len = MFI_SENSE_LEN; + pass->sense_addr_lo = cm->cm_sense_busaddr; + pass->sense_addr_hi = 0; + cm->cm_complete = mfi_ldprobe_tur_complete; + cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; + cm->cm_flags = 0; + mfi_enqueue_ready(cm); + mfi_startio(sc); + + return (0); +} + +static void +mfi_ldprobe_tur_complete(struct mfi_command *cm) +{ + struct mfi_frame_header *hdr; + struct mfi_softc *sc; + + sc = cm->cm_sc; + hdr = &cm->cm_frame->header; + + if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) { + device_printf(sc->mfi_dev, "Logical disk %d is not ready, " + "cmd_status= %d scsi_status= %d\n", hdr->target_id, + hdr->cmd_status, hdr->scsi_status); + mfi_print_sense(sc, cm->cm_sense); + mfi_release_command(cm); + if (--sc->mfi_probe_count <= 0) + wakeup(mfi_startup); + return; + } + mfi_release_command(cm); + mfi_ldprobe_capacity(sc, hdr->target_id); +} + +static int +mfi_ldprobe_capacity(struct mfi_softc *sc, int id) +{ + struct mfi_command *cm; + struct mfi_pass_frame *pass; + struct scsi_read_capacity_data_long *cap; + + cap = malloc(sizeof(*cap), M_MFIBUF, M_NOWAIT|M_ZERO); + if (cap == NULL) + return (ENOMEM); + cm = mfi_dequeue_free(sc); + if (cm == NULL) + return (EBUSY); + pass = &cm->cm_frame->pass; + pass->header.cmd = MFI_CMD_LD_SCSI_IO; + pass->header.target_id = id; + pass->header.lun_id = 0; + pass->header.cdb_len = 6; + pass->header.timeout = 0; + pass->header.data_len = sizeof(*cap); + bzero(pass->cdb, 16); + pass->cdb[0] = 0x9e; /* READ CAPACITY 16 */ + pass->cdb[13] = sizeof(*cap); + pass->header.sense_len = MFI_SENSE_LEN; + pass->sense_addr_lo = cm->cm_sense_busaddr; + pass->sense_addr_hi = 0; + cm->cm_complete = mfi_ldprobe_capacity_complete; + cm->cm_private = cap; + cm->cm_sg = &pass->sgl; + cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; + cm->cm_flags |= MFI_CMD_DATAIN; + cm->cm_data = cap; + cm->cm_len = sizeof(*cap); + mfi_enqueue_ready(cm); + mfi_startio(sc); + + return (0); +} + +static void +mfi_ldprobe_capacity_complete(struct mfi_command *cm) +{ + struct mfi_frame_header *hdr; + struct mfi_softc *sc; + struct scsi_read_capacity_data_long *cap; + uint64_t sectors; + uint32_t secsize; + int target; + + sc = cm->cm_sc; + cap = cm->cm_private; + hdr = &cm->cm_frame->header; + + if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) { + device_printf(sc->mfi_dev, "Failed to read capacity for " + "logical disk\n"); + device_printf(sc->mfi_dev, "cmd_status= %d scsi_status= %d\n", + hdr->cmd_status, hdr->scsi_status); + free(cap, M_MFIBUF); + mfi_release_command(cm); + if (--sc->mfi_probe_count <= 0) + wakeup(mfi_startup); + return; + } + target = hdr->target_id; + sectors = scsi_8btou64(cap->addr); + secsize = scsi_4btoul(cap->length); + free(cap, M_MFIBUF); + mfi_release_command(cm); + mfi_add_ld(sc, target, sectors, secsize); + if (--sc->mfi_probe_count <= 0) + wakeup(mfi_startup); + + return; +} + +static int +mfi_add_ld(struct mfi_softc *sc, int id, uint64_t sectors, uint32_t secsize) +{ + struct mfi_ld *ld; + device_t child; + + ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO); + if (ld == NULL) { + device_printf(sc->mfi_dev, "Cannot allocate ld\n"); + return (ENOMEM); + } + + if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { + device_printf(sc->mfi_dev, "Failed to add logical disk\n"); + return (EINVAL); + } + + ld->ld_id = id; + ld->ld_disk = child; + ld->ld_secsize = secsize; + ld->ld_sectors = sectors; + + device_set_ivars(child, ld); + device_set_desc(child, "MFI Logical Disk"); + TAILQ_INSERT_TAIL(&sc->mfi_ld_tqh, ld, ld_link); + mtx_unlock(&sc->mfi_io_lock); + mtx_lock(&Giant); + bus_generic_attach(sc->mfi_dev); + mtx_unlock(&Giant); + mtx_lock(&sc->mfi_io_lock); + + return (0); +} + +static struct mfi_command * +mfi_bio_command(struct mfi_softc *sc) +{ + struct mfi_io_frame *io; + struct mfi_command *cm; + struct bio *bio; + int flags, blkcount;; + + if ((cm = mfi_dequeue_free(sc)) == NULL) + return (NULL); + + if ((bio = mfi_dequeue_bio(sc)) == NULL) { + mfi_release_command(cm); + return (NULL); + } + + io = &cm->cm_frame->io; + switch (bio->bio_cmd & 0x03) { + case BIO_READ: + io->header.cmd = MFI_CMD_LD_READ; + flags = MFI_CMD_DATAIN; + break; + case BIO_WRITE: + io->header.cmd = MFI_CMD_LD_WRITE; + flags = MFI_CMD_DATAOUT; + break; + default: + panic("Invalid bio command"); + } + + /* Cheat with the sector length to avoid a non-constant division */ + blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; + io->header.target_id = (uintptr_t)bio->bio_driver1; + io->header.timeout = 0; + io->header.flags = 0; + io->header.sense_len = MFI_SENSE_LEN; + io->header.data_len = blkcount; + io->sense_addr_lo = cm->cm_sense_busaddr; + io->sense_addr_hi = 0; + io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; + io->lba_lo = bio->bio_pblkno & 0xffffffff; + cm->cm_complete = mfi_bio_complete; + cm->cm_private = bio; + cm->cm_data = bio->bio_data; + cm->cm_len = bio->bio_bcount; + cm->cm_sg = &io->sgl; + cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; + cm->cm_flags = flags; + + return (cm); +} + +static void +mfi_bio_complete(struct mfi_command *cm) +{ + struct bio *bio; + struct mfi_frame_header *hdr; + struct mfi_softc *sc; + + bio = cm->cm_private; + hdr = &cm->cm_frame->header; + sc = cm->cm_sc; + + if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) { + bio->bio_flags |= BIO_ERROR; + bio->bio_error = EIO; + device_printf(sc->mfi_dev, "I/O error, status= %d " + "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status); + mfi_print_sense(cm->cm_sc, cm->cm_sense); + } + + mfi_release_command(cm); + mfi_disk_complete(bio); +} + +void +mfi_startio(struct mfi_softc *sc) +{ + struct mfi_command *cm; + + for (;;) { + /* Don't bother if we're short on resources */ + if (sc->mfi_flags & MFI_FLAGS_QFRZN) + break; + + /* Try a command that has already been prepared */ + cm = mfi_dequeue_ready(sc); + + /* Nope, so look for work on the bioq */ + if (cm == NULL) + cm = mfi_bio_command(sc); + + /* No work available, so exit */ + if (cm == NULL) + break; + + /* Send the command to the controller */ + if (mfi_mapcmd(sc, cm) != 0) { + mfi_requeue_ready(cm); + break; + } + } +} + +static int +mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) +{ + int error, polled; + + if (cm->cm_data != NULL) { + polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; + error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, + cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); + if (error == EINPROGRESS) { + sc->mfi_flags |= MFI_FLAGS_QFRZN; + return (0); + } + } else { + mfi_enqueue_busy(cm); + error = mfi_send_frame(sc, cm); + } + + return (error); +} + +static void +mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + struct mfi_frame_header *hdr; + struct mfi_command *cm; + struct mfi_softc *sc; + int i, dir; + + if (error) + return; + + cm = (struct mfi_command *)arg; + sc = cm->cm_sc; + hdr = (struct mfi_frame_header *)cm->cm_frame; + + for (i = 0; i < nsegs; i++) { + if ((cm->cm_flags & MFI_FLAGS_SG64) == 0) { + cm->cm_sg->sg32[i].addr = segs[i].ds_addr; + cm->cm_sg->sg32[i].len = segs[i].ds_len; + } else { + cm->cm_sg->sg64[i].addr = segs[i].ds_addr; + cm->cm_sg->sg64[i].len = segs[i].ds_len; + hdr->flags |= MFI_FRAME_SGL64; + } + } + hdr->sg_count = nsegs; + + dir = 0; + if (cm->cm_flags & MFI_CMD_DATAIN) { + dir |= BUS_DMASYNC_PREREAD; + hdr->flags |= MFI_FRAME_DIR_READ; + } + if (cm->cm_flags & MFI_CMD_DATAOUT) { + dir |= BUS_DMASYNC_PREWRITE; + hdr->flags |= MFI_FRAME_DIR_WRITE; + } + bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); + cm->cm_flags |= MFI_CMD_MAPPED; + + /* + * Instead of calculating the total number of frames in the + * compound frame, it's already assumed that there will be at + * least 1 frame, so don't compensate for the modulo of the + * following division. + */ + cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs); + cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; + + /* The caller will take care of delivering polled commands */ + if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { + mfi_enqueue_busy(cm); + mfi_send_frame(sc, cm); + } + + return; +} + +static int +mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) +{ + + /* + * The bus address of the command is aligned on a 64 byte boundary, + * leaving the least 6 bits as zero. For whatever reason, the + * hardware wants the address shifted right by three, leaving just + * 3 zero bits. These three bits are then used to indicate how many + * 64 byte frames beyond the first one are used in the command. The + * extra frames are typically filled with S/G elements. The extra + * frames must also be contiguous. Thus, a compound frame can be at + * most 512 bytes long, allowing for up to 59 32-bit S/G elements or + * 39 64-bit S/G elements for block I/O commands. This means that + * I/O transfers of 256k and higher simply are not possible, which + * is quite odd for such a modern adapter. + */ + MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) | + cm->cm_extra_frames); + return (0); +} + +static void +mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) +{ + int dir; + + if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { + dir = 0; + if (cm->cm_flags & MFI_CMD_DATAIN) + dir |= BUS_DMASYNC_POSTREAD; + if (cm->cm_flags & MFI_CMD_DATAOUT) + dir |= BUS_DMASYNC_POSTWRITE; + + bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); + bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); + cm->cm_flags &= ~MFI_CMD_MAPPED; + } + + if (cm->cm_complete != NULL) + cm->cm_complete(cm); + + sc->mfi_flags &= ~MFI_FLAGS_QFRZN; + mfi_startio(sc); +} + +int +mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) +{ + struct mfi_command *cm; + struct mfi_io_frame *io; + int error; + + if ((cm = mfi_dequeue_free(sc)) == NULL) + return (EBUSY); + + io = &cm->cm_frame->io; + io->header.cmd = MFI_CMD_LD_WRITE; + io->header.target_id = id; + io->header.timeout = 0; + io->header.flags = 0; + io->header.sense_len = MFI_SENSE_LEN; + io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; + io->sense_addr_lo = cm->cm_sense_busaddr; + io->sense_addr_hi = 0; + io->lba_hi = (lba & 0xffffffff00000000) >> 32; + io->lba_lo = lba & 0xffffffff; + cm->cm_data = virt; + cm->cm_len = len; + cm->cm_sg = &io->sgl; + cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; + cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; + + if ((error = mfi_mapcmd(sc, cm)) != 0) { + mfi_release_command(cm); + return (error); + } + + error = mfi_polled_command(sc, cm); + bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); + mfi_release_command(cm); + + return (error); +} + +static int +mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td) +{ + struct mfi_softc *sc; + + sc = dev->si_drv1; + sc->mfi_flags |= MFI_FLAGS_OPEN; + + return (0); +} + +static int +mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td) +{ + struct mfi_softc *sc; + + sc = dev->si_drv1; + sc->mfi_flags &= ~MFI_FLAGS_OPEN; + + return (0); +} + +static int +mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) +{ + struct mfi_softc *sc; + union mfi_statrequest *ms; + int error; + + sc = dev->si_drv1; + error = 0; + + switch (cmd) { + case MFIIO_STATS: + ms = (union mfi_statrequest *)arg; + switch (ms->ms_item) { + case MFIQ_FREE: + case MFIQ_BIO: + case MFIQ_READY: + case MFIQ_BUSY: + bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, + sizeof(struct mfi_qstat)); + break; + default: + error = ENOENT; + break; + } + break; + default: + error = ENOENT; + break; + } + + return (error); +} diff --git a/sys/dev/mfi/mfi_disk.c b/sys/dev/mfi/mfi_disk.c new file mode 100644 index 000000000000..03b04592c76b --- /dev/null +++ b/sys/dev/mfi/mfi_disk.c @@ -0,0 +1,242 @@ +/*- + * Copyright (c) 2006 IronPort Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_mfi.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +static int mfi_disk_probe(device_t dev); +static int mfi_disk_attach(device_t dev); +static int mfi_disk_detach(device_t dev); + +static disk_open_t mfi_disk_open; +static disk_close_t mfi_disk_close; +static disk_strategy_t mfi_disk_strategy; +static dumper_t mfi_disk_dump; + +static devclass_t mfi_disk_devclass; + +struct mfi_disk { + device_t ld_dev; + int ld_id; + int ld_unit; + struct mfi_softc *ld_controller; + struct mfi_ld *ld_ld; + struct disk *ld_disk; +}; + +static device_method_t mfi_disk_methods[] = { + DEVMETHOD(device_probe, mfi_disk_probe), + DEVMETHOD(device_attach, mfi_disk_attach), + DEVMETHOD(device_detach, mfi_disk_detach), + { 0, 0 } +}; + +static driver_t mfi_disk_driver = { + "mfid", + mfi_disk_methods, + sizeof(struct mfi_disk) +}; + +DRIVER_MODULE(mfid, mfi, mfi_disk_driver, mfi_disk_devclass, 0, 0); + +static int +mfi_disk_probe(device_t dev) +{ + + return (0); +} + +static int +mfi_disk_attach(device_t dev) +{ + struct mfi_disk *sc; + struct mfi_ld *ld; + uint64_t sectors; + uint32_t secsize; + + sc = device_get_softc(dev); + ld = device_get_ivars(dev); + + sc->ld_dev = dev; + sc->ld_id = ld->ld_id; + sc->ld_unit = device_get_unit(dev); + sc->ld_ld = device_get_ivars(dev); + sc->ld_controller = device_get_softc(device_get_parent(dev)); + + sectors = sc->ld_ld->ld_sectors; + secsize = sc->ld_ld->ld_secsize; + if (secsize != MFI_SECTOR_LEN) { + device_printf(sc->ld_dev, "Reported sector length %d is not " + "512, aborting\n", secsize); + free(sc->ld_ld, M_MFIBUF); + return (EINVAL); + } + + device_printf(dev, "%juMB (%ju sectors) RAID\n", + sectors / (1024 * 1024 / secsize), sectors); + + sc->ld_disk = disk_alloc(); + sc->ld_disk->d_drv1 = sc; + sc->ld_disk->d_maxsize = sc->ld_controller->mfi_max_io * secsize; + sc->ld_disk->d_name = "mfid"; + sc->ld_disk->d_open = mfi_disk_open; + sc->ld_disk->d_close = mfi_disk_close; + sc->ld_disk->d_strategy = mfi_disk_strategy; + sc->ld_disk->d_dump = mfi_disk_dump; + sc->ld_disk->d_unit = sc->ld_unit; + sc->ld_disk->d_sectorsize = secsize; + sc->ld_disk->d_mediasize = sectors * secsize; + if (sc->ld_disk->d_mediasize >= (1 * 1024 * 1024)) { + sc->ld_disk->d_fwheads = 255; + sc->ld_disk->d_fwsectors = 63; + } else { + sc->ld_disk->d_fwheads = 64; + sc->ld_disk->d_fwsectors = 32; + } + disk_create(sc->ld_disk, DISK_VERSION); + + return (0); +} + +static int +mfi_disk_detach(device_t dev) +{ + struct mfi_disk *sc; + + sc = device_get_softc(dev); + + if (sc->ld_disk->d_flags & DISKFLAG_OPEN) + return (EBUSY); + + disk_destroy(sc->ld_disk); + return (0); +} + +static int +mfi_disk_open(struct disk *dp) +{ + + return (0); +} + +static int +mfi_disk_close(struct disk *dp) +{ + + return (0); +} + +static void +mfi_disk_strategy(struct bio *bio) +{ + struct mfi_disk *sc; + struct mfi_softc *controller; + + sc = bio->bio_disk->d_drv1; + + if (sc == NULL) { + bio->bio_error = EINVAL; + bio->bio_flags |= BIO_ERROR; + bio->bio_resid = bio->bio_bcount; + biodone(bio); + return; + } + + controller = sc->ld_controller; + bio->bio_driver1 = (void *)(uintptr_t)sc->ld_id; + mtx_lock(&controller->mfi_io_lock); + mfi_enqueue_bio(controller, bio); + mfi_startio(controller); + mtx_unlock(&controller->mfi_io_lock); + return; +} + +void +mfi_disk_complete(struct bio *bio) +{ + struct mfi_disk *sc; + struct mfi_frame_header *hdr; + + sc = bio->bio_disk->d_drv1; + hdr = bio->bio_driver1; + + if (bio->bio_flags & BIO_ERROR) { + if (bio->bio_error == 0) + bio->bio_error = EIO; + disk_err(bio, "hard error", -1, 1); + } else { + bio->bio_resid = 0; + } + biodone(bio); +} + +static int +mfi_disk_dump(void *arg, void *virt, vm_offset_t phys, off_t offset, size_t len) +{ + struct mfi_disk *sc; + struct mfi_softc *parent_sc; + struct disk *dp; + int error; + + dp = arg; + sc = dp->d_drv1; + parent_sc = sc->ld_controller; + + if (len > 0) { + if ((error = mfi_dump_blocks(parent_sc, sc->ld_id, offset / + sc->ld_ld->ld_secsize, virt, len)) != 0) + return (error); + } else { + /* mfi_sync_cache(parent_sc, sc->ld_id); */ + } + + return (0); +} diff --git a/sys/dev/mfi/mfi_ioctl.h b/sys/dev/mfi/mfi_ioctl.h new file mode 100644 index 000000000000..6730ba2833a8 --- /dev/null +++ b/sys/dev/mfi/mfi_ioctl.h @@ -0,0 +1,47 @@ +/*- + * Copyright (c) 2006 IronPort Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#define MFIQ_FREE 0 +#define MFIQ_BIO 1 +#define MFIQ_READY 2 +#define MFIQ_BUSY 3 +#define MFIQ_COUNT 4 + +struct mfi_qstat { + uint32_t q_length; + uint32_t q_max; +}; + +union mfi_statrequest { + uint32_t ms_item; + struct mfi_qstat ms_qstat; +}; + +#define MFIIO_STATS _IOWR('Q', 101, union mfi_statrequest) + diff --git a/sys/dev/mfi/mfi_pci.c b/sys/dev/mfi/mfi_pci.c new file mode 100644 index 000000000000..74491ed815ed --- /dev/null +++ b/sys/dev/mfi/mfi_pci.c @@ -0,0 +1,243 @@ +/*- + * Copyright (c) 2006 IronPort Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* PCI/PCI-X/PCIe bus interface for the LSI MegaSAS controllers */ + +#include "opt_mfi.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +static int mfi_pci_probe(device_t); +static int mfi_pci_attach(device_t); +static int mfi_pci_detach(device_t); +static int mfi_pci_suspend(device_t); +static int mfi_pci_resume(device_t); +static void mfi_pci_free(struct mfi_softc *); + +static device_method_t mfi_methods[] = { + DEVMETHOD(device_probe, mfi_pci_probe), + DEVMETHOD(device_attach, mfi_pci_attach), + DEVMETHOD(device_detach, mfi_pci_detach), + DEVMETHOD(device_suspend, mfi_pci_suspend), + DEVMETHOD(device_resume, mfi_pci_resume), + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_driver_added, bus_generic_driver_added), + { 0, 0 } +}; + +static driver_t mfi_pci_driver = { + "mfi", + mfi_methods, + sizeof(struct mfi_softc) +}; + +static devclass_t mfi_devclass; +DRIVER_MODULE(mfi, pci, mfi_pci_driver, mfi_devclass, 0, 0); + +struct mfi_ident { + uint16_t vendor; + uint16_t device; + uint16_t subvendor; + uint16_t subdevice; + int flags; + const char *desc; +} mfi_identifiers[] = { + {0x1000, 0x0411, 0xffff, 0xffff, 0, "LSI MegaSAS 1064R"}, + {0x1028, 0x0015, 0xffff, 0xffff, 0, "Dell PERC 5/i"}, + {0, 0, 0, 0, 0, NULL} +}; + +static struct mfi_ident * +mfi_find_ident(device_t dev) +{ + struct mfi_ident *m; + + for (m = mfi_identifiers; m->vendor != 0; m++) { + if ((m->vendor == pci_get_vendor(dev)) && + (m->device == pci_get_device(dev)) && + ((m->subvendor == pci_get_subvendor(dev)) || + (m->subvendor == 0xffff)) && + ((m->subdevice == pci_get_subdevice(dev)) || + (m->subdevice == 0xffff))) + return (m); + } + + return (NULL); +} + +static int +mfi_pci_probe(device_t dev) +{ + struct mfi_ident *id; + + if ((id = mfi_find_ident(dev)) != NULL) { + device_set_desc(dev, id->desc); + return (BUS_PROBE_DEFAULT); + } + return (ENXIO); +} + +static int +mfi_pci_attach(device_t dev) +{ + struct mfi_softc *sc; + struct mfi_ident *m; + uint32_t command; + int error; + + sc = device_get_softc(dev); + bzero(sc, sizeof(*sc)); + sc->mfi_dev = dev; + + /* Verify that the adapter can be set up in PCI space */ + command = pci_read_config(dev, PCIR_COMMAND, 2); + command |= PCIM_CMD_BUSMASTEREN; + pci_write_config(dev, PCIR_COMMAND, command, 2); + command = pci_read_config(dev, PCIR_COMMAND, 2); + if ((command & PCIM_CMD_BUSMASTEREN) == 0) { + device_printf(dev, "Can't enable PCI busmaster\n"); + return (ENXIO); + } + if ((command & PCIM_CMD_MEMEN) == 0) { + device_printf(dev, "PCI memory window not available\n"); + return (ENXIO); + } + + /* Allocate PCI registers */ + sc->mfi_regs_rid = PCIR_BAR(0); + if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev, + SYS_RES_MEMORY, &sc->mfi_regs_rid, RF_ACTIVE)) == NULL) { + device_printf(dev, "Cannot allocate PCI registers\n"); + return (ENXIO); + } + sc->mfi_btag = rman_get_bustag(sc->mfi_regs_resource); + sc->mfi_bhandle = rman_get_bushandle(sc->mfi_regs_resource); + + error = ENOMEM; + + /* Allocate parent DMA tag */ + if (bus_dma_tag_create( NULL, /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ + BUS_SPACE_UNRESTRICTED, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->mfi_parent_dmat)) { + device_printf(dev, "Cannot allocate parent DMA tag\n"); + goto out; + } + + m = mfi_find_ident(dev); + sc->mfi_flags = m->flags; + + error = mfi_attach(sc); +out: + if (error) { + mfi_free(sc); + mfi_pci_free(sc); + } + + return (error); +} + +static int +mfi_pci_detach(device_t dev) +{ + struct mfi_softc *sc; + struct mfi_ld *ld; + int error; + + sc = device_get_softc(dev); + + if ((sc->mfi_flags & MFI_FLAGS_OPEN) != 0) + return (EBUSY); + + while ((ld = TAILQ_FIRST(&sc->mfi_ld_tqh)) != NULL) { + error = device_delete_child(dev, ld->ld_disk); + if (error) + return (error); + TAILQ_REMOVE(&sc->mfi_ld_tqh, ld, ld_link); + free(ld, M_MFIBUF); + } + + EVENTHANDLER_DEREGISTER(shutdown_final, sc->mfi_eh); + + mfi_shutdown(sc); + mfi_free(sc); + mfi_pci_free(sc); + return (0); +} + +static void +mfi_pci_free(struct mfi_softc *sc) +{ + + if (sc->mfi_regs_resource != NULL) { + bus_release_resource(sc->mfi_dev, SYS_RES_MEMORY, + sc->mfi_regs_rid, sc->mfi_regs_resource); + } + + return; +} + +static int +mfi_pci_suspend(device_t dev) +{ + + return (EINVAL); +} + +static int +mfi_pci_resume(device_t dev) +{ + + return (EINVAL); +} diff --git a/sys/dev/mfi/mfireg.h b/sys/dev/mfi/mfireg.h new file mode 100644 index 000000000000..faf83f74e097 --- /dev/null +++ b/sys/dev/mfi/mfireg.h @@ -0,0 +1,559 @@ +/*- + * Copyright (c) 2006 IronPort Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _MFIREG_H +#define _MFIREG_H + +#include +__FBSDID("$FreeBSD$"); + +/* + * MegaRAID SAS MFI firmware definitions + * + * Calling this driver 'MegaRAID SAS' is a bit misleading. It's a completely + * new firmware interface from the old AMI MegaRAID one, and there is no + * reason why this interface should be limited to just SAS. In any case, LSI + * seems to also call this interface 'MFI', so that will be used here. + */ + +/* + * Start with the register set. All registers are 32 bits wide. + * The usual Intel IOP style setup. + */ +#define MFI_IMSG0 0x10 /* Inbound message 0 */ +#define MFI_IMSG1 0x14 /* Inbound message 1 */ +#define MFI_OMSG0 0x18 /* Outbound message 0 */ +#define MFI_OMSG1 0x1c /* Outbound message 1 */ +#define MFI_IDB 0x20 /* Inbound doorbell */ +#define MFI_ISTS 0x24 /* Inbound interrupt status */ +#define MFI_IMSK 0x28 /* Inbound interrupt mask */ +#define MFI_ODB 0x2c /* Outbound doorbell */ +#define MFI_OSTS 0x30 /* Outbound interrupt status */ +#define MFI_OMSK 0x34 /* Outbound interrupt mask */ +#define MFI_IQP 0x40 /* Inbound queue port */ +#define MFI_OQP 0x44 /* Outbound queue port */ + +/* Bits for MFI_OSTS */ +#define MFI_OSTS_INTR_VALID 0x00000002 + +/* + * Firmware state values. Found in OMSG0 during initialization. + */ +#define MFI_FWSTATE_MASK 0xf0000000 +#define MFI_FWSTATE_UNDEFINED 0x00000000 +#define MFI_FWSTATE_BB_INIT 0x10000000 +#define MFI_FWSTATE_FW_INIT 0x40000000 +#define MFI_FWSTATE_WAIT_HANDSHAKE 0x60000000 +#define MFI_FWSTATE_FW_INIT_2 0x70000000 +#define MFI_FWSTATE_DEVICE_SCAN 0x80000000 +#define MFI_FWSTATE_FLUSH_CACHE 0xa0000000 +#define MFI_FWSTATE_READY 0xb0000000 +#define MFI_FWSTATE_OPERATIONAL 0xc0000000 +#define MFI_FWSTATE_FAULT 0xf0000000 +#define MFI_FWSTATE_MAXSGL_MASK 0x00ff0000 +#define MFI_FWSTATE_MAXCMD_MASK 0x0000ffff + +/* + * Control bits to drive the card to ready state. These go into the IDB + * register. + */ +#define MFI_FWINIT_ABORT 0x00000000 /* Abort all pending commands */ +#define MFI_FWINIT_READY 0x00000002 /* Move from operational to ready */ +#define MFI_FWINIT_MFIMODE 0x00000004 /* unknown */ +#define MFI_FWINIT_CLEAR_HANDSHAKE 0x00000008 /* Respond to WAIT_HANDSHAKE */ + +/* MFI Commands */ +typedef enum { + MFI_CMD_INIT = 0x00, + MFI_CMD_LD_READ, + MFI_CMD_LD_WRITE, + MFI_CMD_LD_SCSI_IO, + MFI_CMD_PD_SCSI_IO, + MFI_CMD_DCMD, + MFI_CMD_ABORT, + MFI_CMD_SMP, + MFI_CMD_STP +} mfi_cmd_t; + +/* Direct commands */ +typedef enum { + MFI_DCMD_CTRL_GETINFO = 0x01010000, + MFI_DCMD_CTRL_FLUSHCACHE = 0x01101000, + MFI_DCMD_CTRL_SHUTDOWN = 0x01050000, + MFI_DCMD_CTRL_EVENT_GETINFO = 0x01040100, + MFI_DCMD_CTRL_EVENT_GET = 0x01040300, + MFI_DCMD_CTRL_EVENT_WAIT = 0x01040500, + MFI_DCMD_LD_GET_PROP = 0x03030000, + MFI_DCMD_CLUSTER = 0x08000000, + MFI_DCMD_CLUSTER_RESET_ALL = 0x08010100, + MFI_DCMD_CLUSTER_RESET_LD = 0x08010200 +} mfi_dcmd_t; + +/* Modifiers for MFI_DCMD_CTRL_FLUSHCACHE */ +#define MFI_FLUSHCACHE_CTRL 0x01 +#define MFI_FLUSHCACHE_DISK 0x02 + +/* Modifiers for MFI_DCMD_CTRL_SHUTDOWN */ +#define MFI_SHUTDOWN_SPINDOWN 0x01 + +/* + * MFI Frmae flags + */ +#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 +#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001 +#define MFI_FRAME_SGL32 0x0000 +#define MFI_FRAME_SGL64 0x0002 +#define MFI_FRAME_SENSE32 0x0000 +#define MFI_FRAME_SENSE64 0x0004 +#define MFI_FRAME_DIR_NONE 0x0000 +#define MFI_FRAME_DIR_WRITE 0x0008 +#define MFI_FRAME_DIR_READ 0x0010 +#define MFI_FRAME_DIR_BOTH 0x0018 + +/* MFI Status codes */ +typedef enum { + MFI_STAT_OK = 0x00, + MFI_STAT_INVALID_CMD, + MFI_STAT_INVALID_DCMD, + MFI_STAT_INVALID_PARAMETER, + MFI_STAT_INVALID_SEQUENCE_NUMBER, + MFI_STAT_ABORT_NOT_POSSIBLE, + MFI_STAT_APP_HOST_CODE_NOT_FOUND, + MFI_STAT_APP_IN_USE, + MFI_STAT_APP_NOT_INITIALIZED, + MFI_STAT_ARRAY_INDEX_INVALID, + MFI_STAT_ARRAY_ROW_NOT_EMPTY, + MFI_STAT_CONFIG_RESOURCE_CONFLICT, + MFI_STAT_DEVICE_NOT_FOUND, + MFI_STAT_DRIVE_TOO_SMALL, + MFI_STAT_FLASH_ALLOC_FAIL, + MFI_STAT_FLASH_BUSY, + MFI_STAT_FLASH_ERROR = 0x10, + MFI_STAT_FLASH_IMAGE_BAD, + MFI_STAT_FLASH_IMAGE_INCOMPLETE, + MFI_STAT_FLASH_NOT_OPEN, + MFI_STAT_FLASH_NOT_STARTED, + MFI_STAT_FLUSH_FAILED, + MFI_STAT_HOST_CODE_NOT_FOUNT, + MFI_STAT_LD_CC_IN_PROGRESS, + MFI_STAT_LD_INIT_IN_PROGRESS, + MFI_STAT_LD_LBA_OUT_OF_RANGE, + MFI_STAT_LD_MAX_CONFIGURED, + MFI_STAT_LD_NOT_OPTIMAL, + MFI_STAT_LD_RBLD_IN_PROGRESS, + MFI_STAT_LD_RECON_IN_PROGRESS, + MFI_STAT_LD_WRONG_RAID_LEVEL, + MFI_STAT_MAX_SPARES_EXCEEDED, + MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, + MFI_STAT_MFC_HW_ERROR, + MFI_STAT_NO_HW_PRESENT, + MFI_STAT_NOT_FOUND, + MFI_STAT_NOT_IN_ENCL, + MFI_STAT_PD_CLEAR_IN_PROGRESS, + MFI_STAT_PD_TYPE_WRONG, + MFI_STAT_PR_DISABLED, + MFI_STAT_ROW_INDEX_INVALID, + MFI_STAT_SAS_CONFIG_INVALID_ACTION, + MFI_STAT_SAS_CONFIG_INVALID_DATA, + MFI_STAT_SAS_CONFIG_INVALID_PAGE, + MFI_STAT_SAS_CONFIG_INVALID_TYPE, + MFI_STAT_SCSI_DONE_WITH_ERROR, + MFI_STAT_SCSI_IO_FAILED, + MFI_STAT_SCSI_RESERVATION_CONFLICT, + MFI_STAT_SHUTDOWN_FAILED = 0x30, + MFI_STAT_TIME_NOT_SET, + MFI_STAT_WRONG_STATE, + MFI_STAT_LD_OFFLINE, + MFI_STAT_PEER_NOTIFICATION_REJECTED, + MFI_STAT_PEER_NOTIFICATION_FAILED, + MFI_STAT_RESERVATION_IN_PROGRESS, + MFI_STAT_I2C_ERRORS_DETECTED, + MFI_STAT_PCI_ERRORS_DETECTED, + MFI_STAT_INVALID_STATUS = 0xFF +} mfi_status_t; + +typedef enum { + MFI_EVT_CLASS_DEBUG = -2, + MFI_EVT_CLASS_PROGRESS = -1, + MFI_EVT_CLASS_INFO = 0, + MFI_EVT_CLASS_WARNING = 1, + MFI_EVT_CLASS_CRITICAL = 2, + MFI_EVT_CLASS_FATAL = 3, + MFI_EVT_CLASS_DEAD = 4 +} mfi_evt_class_t; + +typedef enum { + MFI_EVT_LOCALE_LD = 0x0001, + MFI_EVT_LOCALE_PD = 0x0002, + MFI_EVT_LOCALE_ENCL = 0x0004, + MFI_EVT_LOCALE_BBU = 0x0008, + MFI_EVT_LOCALE_SAS = 0x0010, + MFI_EVT_LOCALE_CTRL = 0x0020, + MFI_EVT_LOCALE_CONFIG = 0x0040, + MFI_EVT_LOCALE_CLUSTER = 0x0080, + MFI_EVT_LOCALE_ALL = 0xffff +} mfi_evt_locale_t; + +typedef enum { + MR_EVT_ARGS_NONE = 0x00, + MR_EVT_ARGS_CDB_SENSE, + MR_EVT_ARGS_LD, + MR_EVT_ARGS_LD_COUNT, + MR_EVT_ARGS_LD_LBA, + MR_EVT_ARGS_LD_OWNER, + MR_EVT_ARGS_LD_LBA_PD_LBA, + MR_EVT_ARGS_LD_PROG, + MR_EVT_ARGS_LD_STATE, + MR_EVT_ARGS_LD_STRIP, + MR_EVT_ARGS_PD, + MR_EVT_ARGS_PD_ERR, + MR_EVT_ARGS_PD_LBA, + MR_EVT_ARGS_PD_LBA_LD, + MR_EVT_ARGS_PD_PROG, + MR_EVT_ARGS_PD_STATE, + MR_EVT_ARGS_PCI, + MR_EVT_ARGS_RATE, + MR_EVT_ARGS_STR, + MR_EVT_ARGS_TIME, + MR_EVT_ARGS_ECC +} mfi_evt_args; + +/* + * Other propertities and definitions + */ +#define MFI_MAX_PD_CHANNELS 2 +#define MFI_MAX_LD_CHANNELS 2 +#define MFI_MAX_CHANNELS (MFI_MAX_PD_CHANNELS + MFI_MAX_LD_CHANNELS) +#define MFI_MAX_CHANNEL_DEVS 128 +#define MFI_DEFAULT_ID -1 +#define MFI_MAX_LUN 8 +#define MFI_MAX_LD 64 + +#define MFI_FRAME_SIZE 64 +#define MFI_MBOX_SIZE 12 + +#define MFI_POLL_TIMEOUT_SECS 10 + +/* Allow for speedier math calculations */ +#define MFI_SECTOR_LEN 512 + +/* Scatter Gather elements */ +struct mfi_sg32 { + uint32_t addr; + uint32_t len; +} __packed; + +struct mfi_sg64 { + uint64_t addr; + uint32_t len; +} __packed; + +union mfi_sgl { + struct mfi_sg32 sg32[1]; + struct mfi_sg64 sg64[1]; +} __packed; + +/* Message frames. All messages have a common header */ +struct mfi_frame_header { + uint8_t cmd; + uint8_t sense_len; + uint8_t cmd_status; + uint8_t scsi_status; + uint8_t target_id; + uint8_t lun_id; + uint8_t cdb_len; + uint8_t sg_count; + uint32_t context; + uint32_t pad0; + uint16_t flags; + uint16_t timeout; + uint32_t data_len; +} __packed; + +struct mfi_init_frame { + struct mfi_frame_header header; + uint32_t qinfo_new_addr_lo; + uint32_t qinfo_new_addr_hi; + uint32_t qinfo_old_addr_lo; + uint32_t qinfo_old_addr_hi; + uint32_t reserved[6]; +} __packed; + +#define MFI_IO_FRAME_SIZE 40 +struct mfi_io_frame { + struct mfi_frame_header header; + uint32_t sense_addr_lo; + uint32_t sense_addr_hi; + uint32_t lba_lo; + uint32_t lba_hi; + union mfi_sgl sgl; +} __packed; + +#define MFI_PASS_FRAME_SIZE 48 +struct mfi_pass_frame { + struct mfi_frame_header header; + uint32_t sense_addr_lo; + uint32_t sense_addr_hi; + uint8_t cdb[16]; + union mfi_sgl sgl; +} __packed; + +#define MFI_DCMD_FRAME_SIZE 40 +struct mfi_dcmd_frame { + struct mfi_frame_header header; + uint32_t opcode; + uint8_t mbox[MFI_MBOX_SIZE]; + union mfi_sgl sgl; +} __packed; + +struct mfi_abort_frame { + struct mfi_frame_header header; + uint32_t abort_context; + uint32_t pad; + uint32_t abort_mfi_addr_lo; + uint32_t abort_mfi_addr_hi; + uint32_t reserved[6]; +} __packed; + +struct mfi_smp_frame { + struct mfi_frame_header header; + uint64_t sas_addr; + union { + struct mfi_sg32 sg32[2]; + struct mfi_sg64 sg64[2]; + } sgl; +} __packed; + +struct mfi_stp_frame { + struct mfi_frame_header header; + uint16_t fis[10]; + uint32_t stp_flags; + union { + struct mfi_sg32 sg32[2]; + struct mfi_sg64 sg64[2]; + } sgl; +} __packed; + +union mfi_frame { + struct mfi_frame_header header; + struct mfi_init_frame init; + struct mfi_io_frame io; + struct mfi_pass_frame pass; + struct mfi_dcmd_frame dcmd; + struct mfi_abort_frame abort; + struct mfi_smp_frame smp; + struct mfi_stp_frame stp; + uint8_t bytes[MFI_FRAME_SIZE]; +}; + +#define MFI_SENSE_LEN 128 +struct mfi_sense { + uint8_t data[MFI_SENSE_LEN]; +}; + +/* The queue init structure that is passed with the init message */ +struct mfi_init_qinfo { + uint32_t flags; + uint32_t rq_entries; + uint32_t rq_addr_lo; + uint32_t rq_addr_hi; + uint32_t pi_addr_lo; + uint32_t pi_addr_hi; + uint32_t ci_addr_lo; + uint32_t ci_addr_hi; +} __packed; + +/* SAS (?) controller properties, part of mfi_ctrl_info */ +struct mfi_ctrl_props { + uint16_t seq_num; + uint16_t pred_fail_poll_interval; + uint16_t intr_throttle_cnt; + uint16_t intr_throttle_timeout; + uint8_t rebuild_rate; + uint8_t patrol_read_rate; + uint8_t bgi_rate; + uint8_t cc_rate; + uint8_t recon_rate; + uint8_t cache_flush_interval; + uint8_t spinup_drv_cnt; + uint8_t spinup_delay; + uint8_t cluster_enable; + uint8_t coercion_mode; + uint8_t alarm_enable; + uint8_t disable_auto_rebuild; + uint8_t disable_battery_warn; + uint8_t ecc_bucket_size; + uint16_t ecc_bucket_leak_rate; + uint8_t restore_hotspare_on_insertion; + uint8_t expose_encl_devices; + uint8_t reserved[38]; +} __packed; + +/* PCI information about the card. */ +struct mfi_info_pci { + uint16_t vendor; + uint16_t device; + uint16_t subvendor; + uint16_t subdevice; + uint8_t reserved[24]; +} __packed; + +/* Host (front end) interface information */ +struct mfi_info_host { + uint8_t type; +#define MFI_INFO_HOST_PCIX 0x01 +#define MFI_INFO_HOST_PCIE 0x02 +#define MFI_INFO_HOST_ISCSI 0x04 +#define MFI_INFO_HOST_SAS3G 0x08 + uint8_t reserved[6]; + uint8_t port_count; + uint64_t port_addr[8]; +} __packed; + +/* Device (back end) interface information */ +struct mfi_info_device { + uint8_t type; +#define MFI_INFO_DEV_SPI 0x01 +#define MFI_INFO_DEV_SAS3G 0x02 +#define MFI_INFO_DEV_SATA1 0x04 +#define MFI_INFO_DEV_SATA3G 0x08 + uint8_t reserved[6]; + uint8_t port_count; + uint64_t port_addr[8]; +} __packed; + +/* Firmware component information */ +struct mfi_info_component { + char name[8]; + char version[32]; + char build_date[16]; + char build_time[16]; +} __packed; + + +/* SAS (?) controller info, returned from MFI_DCMD_CTRL_GETINFO. */ +struct mfi_ctrl_info { + struct mfi_info_pci pci; + struct mfi_info_host host; + struct mfi_info_device device; + + /* Firmware components that are present and active. */ + uint32_t image_check_word; + uint32_t image_component_count; + struct mfi_info_component image_component[8]; + + /* Firmware components that have been flashed but are inactive */ + uint32_t pending_image_component_count; + struct mfi_info_component pending_image_component[8]; + + uint8_t max_arms; + uint8_t max_spans; + uint8_t max_arrays; + uint8_t max_lds; + char product_name[80]; + char serial_number[32]; + uint32_t hw_present; +#define MFI_INFO_HW_BBU 0x01 +#define MFI_INFO_HW_ALARM 0x02 +#define MFI_INFO_HW_NVRAM 0x04 +#define MFI_INFO_HW_UART 0x08 + uint32_t current_fw_time; + uint16_t max_cmds; + uint16_t max_sg_elements; + uint32_t max_request_size; + uint16_t lds_present; + uint16_t lds_degraded; + uint16_t lds_offline; + uint16_t pd_present; + uint16_t pd_disks_present; + uint16_t pd_disks_pred_failure; + uint16_t pd_disks_failed; + uint16_t nvram_size; + uint16_t memory_size; + uint16_t flash_size; + uint16_t ram_correctable_errors; + uint16_t ram_uncorrectable_errors; + uint8_t cluster_allowed; + uint8_t cluster_active; + uint16_t max_strips_per_io; + + uint32_t raid_levels; +#define MFI_INFO_RAID_0 0x01 +#define MFI_INFO_RAID_1 0x02 +#define MFI_INFO_RAID_5 0x04 +#define MFI_INFO_RAID_1E 0x08 +#define MFI_INFO_RAID_6 0x10 + + uint32_t adapter_ops; +#define MFI_INFO_AOPS_RBLD_RATE 0x0001 +#define MFI_INFO_AOPS_CC_RATE 0x0002 +#define MFI_INFO_AOPS_BGI_RATE 0x0004 +#define MFI_INFO_AOPS_RECON_RATE 0x0008 +#define MFI_INFO_AOPS_PATROL_RATE 0x0010 +#define MFI_INFO_AOPS_ALARM_CONTROL 0x0020 +#define MFI_INFO_AOPS_CLUSTER_SUPPORTED 0x0040 +#define MFI_INFO_AOPS_BBU 0x0080 +#define MFI_INFO_AOPS_SPANNING_ALLOWED 0x0100 +#define MFI_INFO_AOPS_DEDICATED_SPARES 0x0200 +#define MFI_INFO_AOPS_REVERTIBLE_SPARES 0x0400 +#define MFI_INFO_AOPS_FOREIGN_IMPORT 0x0800 +#define MFI_INFO_AOPS_SELF_DIAGNOSTIC 0x1000 +#define MFI_INFO_AOPS_MIXED_ARRAY 0x2000 +#define MFI_INFO_AOPS_GLOBAL_SPARES 0x4000 + + uint32_t ld_ops; +#define MFI_INFO_LDOPS_READ_POLICY 0x01 +#define MFI_INFO_LDOPS_WRITE_POLICY 0x02 +#define MFI_INFO_LDOPS_IO_POLICY 0x04 +#define MFI_INFO_LDOPS_ACCESS_POLICY 0x08 +#define MFI_INFO_LDOPS_DISK_CACHE_POLICY 0x10 + + struct { + uint8_t min; + uint8_t max; + uint8_t reserved[2]; + } __packed stripe_sz_ops; + + uint32_t pd_ops; +#define MFI_INFO_PDOPS_FORCE_ONLINE 0x01 +#define MFI_INFO_PDOPS_FORCE_OFFLINE 0x02 +#define MFI_INFO_PDOPS_FORCE_REBUILD 0x04 + + uint32_t pd_mix_support; +#define MFI_INFO_PDMIX_SAS 0x01 +#define MFI_INFO_PDMIX_SATA 0x02 +#define MFI_INFO_PDMIX_ENCL 0x04 +#define MFI_INFO_PDMIX_LD 0x08 +#define MFI_INFO_PDMIX_SATA_CLUSTER 0x10 + + uint8_t ecc_bucket_count; + uint8_t reserved2[11]; + struct mfi_ctrl_props properties; + char package_version[0x60]; + uint8_t pad[0x800 - 0x6a0]; +} __packed; + +#endif /* _MFIREG_H */ diff --git a/sys/dev/mfi/mfivar.h b/sys/dev/mfi/mfivar.h new file mode 100644 index 000000000000..c02296d98be9 --- /dev/null +++ b/sys/dev/mfi/mfivar.h @@ -0,0 +1,316 @@ +/*- + * Copyright (c) 2006 IronPort Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _MFIVAR_H +#define _MFIVAR_H + +#include +__FBSDID("$FreeBSD$"); + +/* + * SCSI structures and definitions are used from here, but no linking + * requirements are made to CAM. + */ +#include + +struct mfi_hwcomms { + uint32_t hw_pi; + uint32_t hw_ci; + uint32_t hw_reply_q[1]; +}; + +struct mfi_softc; + +struct mfi_command { + TAILQ_ENTRY(mfi_command) cm_link; + struct mfi_softc *cm_sc; + union mfi_frame *cm_frame; + uint32_t cm_frame_busaddr; + struct mfi_sense *cm_sense; + uint32_t cm_sense_busaddr; + bus_dmamap_t cm_dmamap; + union mfi_sgl *cm_sg; + void *cm_data; + int cm_len; + int cm_total_frame_size; + int cm_extra_frames; + int cm_flags; +#define MFI_CMD_MAPPED (1<<0) +#define MFI_CMD_DATAIN (1<<1) +#define MFI_CMD_DATAOUT (1<<2) +#define MFI_CMD_COMPLETED (1<<3) +#define MFI_CMD_POLLED (1<<4) +#define MFI_ON_MFIQ_FREE (1<<5) +#define MFI_ON_MFIQ_READY (1<<6) +#define MFI_ON_MFIQ_BUSY (1<<7) +#define MFI_ON_MFIQ_MASK ((1<<5)|(1<<6)|(1<<7)) + void (* cm_complete)(struct mfi_command *cm); + void *cm_private; +}; + +struct mfi_ld { + TAILQ_ENTRY(mfi_ld) ld_link; + device_t ld_disk; + uint64_t ld_sectors; + uint32_t ld_secsize; + int ld_id; +}; + +struct mfi_softc { + device_t mfi_dev; + int mfi_flags; +#define MFI_FLAGS_SG64 (1<<0) +#define MFI_FLAGS_QFRZN (1<<1) +#define MFI_FLAGS_OPEN (1<<2) + + struct mfi_hwcomms *mfi_comms; + TAILQ_HEAD(,mfi_command) mfi_free; + TAILQ_HEAD(,mfi_command) mfi_ready; + TAILQ_HEAD(,mfi_command) mfi_busy; + struct bio_queue_head mfi_bioq; + struct mfi_qstat mfi_qstat[MFIQ_COUNT]; + + struct resource *mfi_regs_resource; + bus_space_handle_t mfi_bhandle; + bus_space_tag_t mfi_btag; + int mfi_regs_rid; + + bus_dma_tag_t mfi_parent_dmat; + bus_dma_tag_t mfi_buffer_dmat; + + bus_dma_tag_t mfi_comms_dmat; + bus_dmamap_t mfi_comms_dmamap; + uint32_t mfi_comms_busaddr; + + bus_dma_tag_t mfi_frames_dmat; + bus_dmamap_t mfi_frames_dmamap; + uint32_t mfi_frames_busaddr; + union mfi_frame *mfi_frames; + + bus_dma_tag_t mfi_sense_dmat; + bus_dmamap_t mfi_sense_dmamap; + uint32_t mfi_sense_busaddr; + struct mfi_sense *mfi_sense; + + struct resource *mfi_irq; + void *mfi_intr; + int mfi_irq_rid; + + struct intr_config_hook mfi_ich; + eventhandler_tag eh; + int mfi_probe_count; + + /* + * Allocation for the command array. Used as an indexable array to + * recover completed commands. + */ + struct mfi_command *mfi_commands; + /* + * How many commands were actually allocated + */ + int mfi_total_cmds; + /* + * How many commands the firmware can handle. Also how big the reply + * queue is, minus 1. + */ + int mfi_max_fw_cmds; + /* + * Max number of S/G elements the firmware can handle + */ + int mfi_max_fw_sgl; + /* + * How many S/G elements we'll ever actually use + */ + int mfi_total_sgl; + /* + * How many bytes a compound frame is, including all of the extra frames + * that are used for S/G elements. + */ + int mfi_frame_size; + /* + * How large an S/G element is. Used to calculate the number of single + * frames in a command. + */ + int mfi_sgsize; + /* + * Max number of sectors that the firmware allows + */ + uint32_t mfi_max_io; + + TAILQ_HEAD(,mfi_ld) mfi_ld_tqh; + eventhandler_tag mfi_eh; + struct cdev *mfi_cdev; + + struct mtx mfi_io_lock; +}; + +extern int mfi_attach(struct mfi_softc *); +extern void mfi_free(struct mfi_softc *); +extern int mfi_shutdown(struct mfi_softc *); +extern void mfi_startio(struct mfi_softc *); +extern void mfi_disk_complete(struct bio *); +extern int mfi_dump_blocks(struct mfi_softc *, int id, uint64_t, void *, int); + +#define MFIQ_ADD(sc, qname) \ + do { \ + struct mfi_qstat *qs; \ + \ + qs = &(sc)->mfi_qstat[qname]; \ + qs->q_length++; \ + if (qs->q_length > qs->q_max) \ + qs->q_max = qs->q_length; \ + } while (0) + +#define MFIQ_REMOVE(sc, qname) (sc)->mfi_qstat[qname].q_length-- + +#define MFIQ_INIT(sc, qname) \ + do { \ + sc->mfi_qstat[qname].q_length = 0; \ + sc->mfi_qstat[qname].q_max = 0; \ + } while (0) + +#define MFIQ_COMMAND_QUEUE(name, index) \ + static __inline void \ + mfi_initq_ ## name (struct mfi_softc *sc) \ + { \ + TAILQ_INIT(&sc->mfi_ ## name); \ + MFIQ_INIT(sc, index); \ + } \ + static __inline void \ + mfi_enqueue_ ## name (struct mfi_command *cm) \ + { \ + if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) { \ + printf("command %p is on another queue, " \ + "flags = %#x\n", cm, cm->cm_flags); \ + panic("command is on another queue"); \ + } \ + TAILQ_INSERT_TAIL(&cm->cm_sc->mfi_ ## name, cm, cm_link); \ + cm->cm_flags |= MFI_ON_ ## index; \ + MFIQ_ADD(cm->cm_sc, index); \ + } \ + static __inline void \ + mfi_requeue_ ## name (struct mfi_command *cm) \ + { \ + if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) { \ + printf("command %p is on another queue, " \ + "flags = %#x\n", cm, cm->cm_flags); \ + panic("command is on another queue"); \ + } \ + TAILQ_INSERT_HEAD(&cm->cm_sc->mfi_ ## name, cm, cm_link); \ + cm->cm_flags |= MFI_ON_ ## index; \ + MFIQ_ADD(cm->cm_sc, index); \ + } \ + static __inline struct mfi_command * \ + mfi_dequeue_ ## name (struct mfi_softc *sc) \ + { \ + struct mfi_command *cm; \ + \ + if ((cm = TAILQ_FIRST(&sc->mfi_ ## name)) != NULL) { \ + if ((cm->cm_flags & MFI_ON_ ## index) == 0) { \ + printf("command %p not in queue, " \ + "flags = %#x, bit = %#x\n", cm, \ + cm->cm_flags, MFI_ON_ ## index); \ + panic("command not in queue"); \ + } \ + TAILQ_REMOVE(&sc->mfi_ ## name, cm, cm_link); \ + cm->cm_flags &= ~MFI_ON_ ## index; \ + MFIQ_REMOVE(sc, index); \ + } \ + return (cm); \ + } \ + static __inline void \ + mfi_remove_ ## name (struct mfi_command *cm) \ + { \ + if ((cm->cm_flags & MFI_ON_ ## index) == 0) { \ + printf("command %p not in queue, flags = %#x, " \ + "bit = %#x\n", cm, cm->cm_flags, \ + MFI_ON_ ## index); \ + panic("command not in queue"); \ + } \ + TAILQ_REMOVE(&cm->cm_sc->mfi_ ## name, cm, cm_link); \ + cm->cm_flags &= ~MFI_ON_ ## index; \ + MFIQ_REMOVE(cm->cm_sc, index); \ + } \ +struct hack + +MFIQ_COMMAND_QUEUE(free, MFIQ_FREE); +MFIQ_COMMAND_QUEUE(ready, MFIQ_READY); +MFIQ_COMMAND_QUEUE(busy, MFIQ_BUSY); + +static __inline void +mfi_initq_bio(struct mfi_softc *sc) +{ + bioq_init(&sc->mfi_bioq); + MFIQ_INIT(sc, MFIQ_BIO); +} + +static __inline void +mfi_enqueue_bio(struct mfi_softc *sc, struct bio *bp) +{ + bioq_insert_tail(&sc->mfi_bioq, bp); + MFIQ_ADD(sc, MFIQ_BIO); +} + +static __inline struct bio * +mfi_dequeue_bio(struct mfi_softc *sc) +{ + struct bio *bp; + + if ((bp = bioq_first(&sc->mfi_bioq)) != NULL) { + bioq_remove(&sc->mfi_bioq, bp); + MFIQ_REMOVE(sc, MFIQ_BIO); + } + return (bp); +} + +static __inline void +mfi_print_sense(struct mfi_softc *sc, void *sense) +{ + int error, key, asc, ascq; + + scsi_extract_sense((struct scsi_sense_data *)sense, + &error, &key, &asc, &ascq); + device_printf(sc->mfi_dev, "sense error %d, sense_key %d, " + "asc %d, ascq %d\n", error, key, asc, ascq); +} + + +#define MFI_WRITE4(sc, reg, val) bus_space_write_4((sc)->mfi_btag, \ + sc->mfi_bhandle, (reg), (val)) +#define MFI_READ4(sc, reg) bus_space_read_4((sc)->mfi_btag, \ + (sc)->mfi_bhandle, (reg)) +#define MFI_WRITE2(sc, reg, val) bus_space_write_2((sc)->mfi_btag, \ + sc->mfi_bhandle, (reg), (val)) +#define MFI_READ2(sc, reg) bus_space_read_2((sc)->mfi_btag, \ + (sc)->mfi_bhandle, (reg)) +#define MFI_WRITE1(sc, reg, val) bus_space_write_1((sc)->mfi_btag, \ + sc->mfi_bhandle, (reg), (val)) +#define MFI_READ1(sc, reg) bus_space_read_1((sc)->mfi_btag, \ + (sc)->mfi_bhandle, (reg)) + +MALLOC_DECLARE(M_MFIBUF); + +#endif /* _MFIVAR_H */ diff --git a/sys/modules/mfi/Makefile b/sys/modules/mfi/Makefile new file mode 100644 index 000000000000..6d0ddf452fbb --- /dev/null +++ b/sys/modules/mfi/Makefile @@ -0,0 +1,15 @@ +# $FreeBSD$ + +.PATH: ${.CURDIR}/../../dev/mfi + +KMOD= mfi +SRCS= mfi.c mfi_pci.c mfi_disk.c +SRCS+= opt_mfi.h +SRCS+= device_if.h bus_if.h pci_if.h +CFLAGS+= -Wall -Werror + +# To enable debug output from the driver, uncomment these two lines. +#CFLAGS+= -DMFI_DEBUG=2 +#SRCS+= mfi_debug.c + +.include