diff --git a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c index 138fcfc4cf16..f5672661cc6b 100644 --- a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c +++ b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c @@ -78,6 +78,13 @@ __FBSDID("$FreeBSD$"); #define ALLOCATED_DMA_SEGS (NUM_DMA_SEGS + NUM_DMA_SPILL_SEGS) #define BCM_DMA_MAXSIZE (NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE) +#define BCM_SDHCI_SLOT_LEFT(slot) \ + ((slot)->curcmd->data->len - (slot)->offset) + +#define BCM_SDHCI_SEGSZ_LEFT(slot) \ + min(BCM_DMA_MAXSIZE, \ + rounddown(BCM_SDHCI_SLOT_LEFT(slot), BCM_SDHCI_BUFFER_SIZE)) + #define DATA_PENDING_MASK (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL) #ifdef DEBUG @@ -172,6 +179,7 @@ static void bcm_sdhci_intr(void *); static int bcm_sdhci_get_ro(device_t, device_t); static void bcm_sdhci_dma_intr(int ch, void *arg); +static void bcm_sdhci_start_dma(struct sdhci_slot *slot); static void bcm_sdhci_dmacb(void *arg, bus_dma_segment_t *segs, int nseg, int err) @@ -569,8 +577,9 @@ bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc) */ if (idx == 0) { bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op); + slot->intmask &= ~DATA_PENDING_MASK; - bcm_sdhci_write_4(sc->sc_dev, &sc->sc_slot, SDHCI_SIGNAL_ENABLE, + bcm_sdhci_write_4(sc->sc_dev, slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } @@ -582,21 +591,29 @@ bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc) KASSERT((err == 0), ("bcm2835_sdhci: failed DMA start")); } +static void +bcm_sdhci_dma_exit(struct bcm_sdhci_softc *sc) +{ + struct sdhci_slot *slot = &sc->sc_slot; + + mtx_assert(&slot->mtx, MA_OWNED); + + /* Re-enable interrupts */ + slot->intmask |= DATA_PENDING_MASK; + bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, + slot->intmask); +} + static void bcm_sdhci_dma_intr(int ch, void *arg) { struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg; struct sdhci_slot *slot = &sc->sc_slot; uint32_t reg; - int left, sync_op; mtx_lock(&slot->mtx); - - if (slot->curcmd == NULL) { - mtx_unlock(&slot->mtx); - return; - } - + if (slot->curcmd == NULL) + goto out; /* * If there are more segments for the current dma, start the next one. * Otherwise unload the dma map and decide what to do next based on the @@ -604,122 +621,64 @@ bcm_sdhci_dma_intr(int ch, void *arg) */ if (sc->dmamap_seg_index < sc->dmamap_seg_count) { bcm_sdhci_start_dma_seg(sc); - mtx_unlock(&slot->mtx); - return; + goto out; } - if (slot->curcmd->data->flags & MMC_DATA_READ) - sync_op = BUS_DMASYNC_POSTREAD; + if ((slot->curcmd->data->flags & MMC_DATA_READ) != 0) + bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, + BUS_DMASYNC_POSTREAD); else - sync_op = BUS_DMASYNC_POSTWRITE; + bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map); - if (sc->dmamap_seg_count != 0) { - bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op); - bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map); - - sc->dmamap_seg_count = 0; - sc->dmamap_seg_index = 0; - } - - left = min(BCM_SDHCI_BUFFER_SIZE, - slot->curcmd->data->len - slot->offset); + sc->dmamap_seg_count = 0; + sc->dmamap_seg_index = 0; /* - * If there is less than buffer size outstanding, we would not handle - * it anymore using DMA if bcm_sdhci_will_handle_transfer() were asked. - * Re-enable interrupts and return and let the SDHCI state machine - * finish the job. + * If we had no further segments pending, we need to determine how to + * proceed next. If the 'data/space pending' bit is already set and we + * can continue via DMA, do so. Otherwise, re-enable interrupts and + * return. */ - if (left < BCM_SDHCI_BUFFER_SIZE) { - /* Re-enable data interrupts. */ - slot->intmask |= DATA_PENDING_MASK; - bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, - slot->intmask); - mtx_unlock(&slot->mtx); - return; - } - reg = bcm_sdhci_read_4(slot->bus, slot, SDHCI_INT_STATUS); + if ((reg & DATA_PENDING_MASK) != 0 && + BCM_SDHCI_SEGSZ_LEFT(slot) >= BCM_SDHCI_BUFFER_SIZE) { + /* ACK any pending interrupts */ + bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS, + DATA_PENDING_MASK); - /* already available? */ - if ((reg & DATA_PENDING_MASK) != 0) { - - /* ACK for DATA_AVAIL or SPACE_AVAIL */ - bcm_sdhci_write_4(slot->bus, slot, - SDHCI_INT_STATUS, DATA_PENDING_MASK); - - /* continue next DMA transfer */ - if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, - (uint8_t *)slot->curcmd->data->data + - slot->offset, left, bcm_sdhci_dmacb, sc, - BUS_DMA_NOWAIT) != 0 || sc->dmamap_status != 0) { - slot->curcmd->error = MMC_ERR_NO_MEMORY; + bcm_sdhci_start_dma(slot); + if (slot->curcmd->error != 0) { sdhci_finish_data(slot); - } else { - bcm_sdhci_start_dma_seg(sc); + bcm_sdhci_dma_exit(sc); } } else { - /* wait for next data by INT */ - - /* enable INT */ - slot->intmask |= DATA_PENDING_MASK; - bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, - slot->intmask); + bcm_sdhci_dma_exit(sc); } - +out: mtx_unlock(&slot->mtx); } static void -bcm_sdhci_read_dma(device_t dev, struct sdhci_slot *slot) +bcm_sdhci_start_dma(struct sdhci_slot *slot) { struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); + uint8_t *buf; size_t left; - /* XXX TODO: Not many-segment safe */ - if (sc->dmamap_seg_count != 0) { - device_printf(sc->sc_dev, "DMA in use\n"); - return; - } + mtx_assert(&slot->mtx, MA_OWNED); - left = min(BCM_SDHCI_BUFFER_SIZE, - slot->curcmd->data->len - slot->offset); + left = BCM_SDHCI_SEGSZ_LEFT(slot); + buf = (uint8_t *)slot->curcmd->data->data + slot->offset; + KASSERT(left != 0, + ("%s: DMA handling incorrectly indicated", __func__)); - KASSERT((left & 3) == 0, - ("%s: len = %zu, not word-aligned", __func__, left)); - - if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, - (uint8_t *)slot->curcmd->data->data + slot->offset, left, - bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 || - sc->dmamap_status != 0) { - slot->curcmd->error = MMC_ERR_NO_MEMORY; - return; - } - - /* DMA start */ - bcm_sdhci_start_dma_seg(sc); -} - -static void -bcm_sdhci_write_dma(device_t dev, struct sdhci_slot *slot) -{ - struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); - size_t left; - - /* XXX TODO: Not many-segment safe */ - if (sc->dmamap_seg_count != 0) { - device_printf(sc->sc_dev, "DMA in use\n"); - return; - } - - left = min(BCM_SDHCI_BUFFER_SIZE, - slot->curcmd->data->len - slot->offset); - - KASSERT((left & 3) == 0, - ("%s: len = %zu, not word-aligned", __func__, left)); - - if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, - (uint8_t *)slot->curcmd->data->data + slot->offset, left, + /* + * No need to check segment count here; if we've not yet unloaded + * previous segments, we'll catch that in bcm_sdhci_dmacb. + */ + if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, buf, left, bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 || sc->dmamap_status != 0) { slot->curcmd->error = MMC_ERR_NO_MEMORY; @@ -734,20 +693,25 @@ static int bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot) { struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); - size_t left; if (!sc->conf->use_dma) return (0); /* - * Do not use DMA for transfers less than block size or with a length - * that is not a multiple of four. + * This indicates that we somehow let a data interrupt slip by into the + * SDHCI framework, when it should not have. This really needs to be + * caught and fixed ASAP, as it really shouldn't happen. */ - left = min(BCM_DMA_BLOCK_SIZE, - slot->curcmd->data->len - slot->offset); - if (left < BCM_DMA_BLOCK_SIZE) - return (0); - if (left & 0x03) + KASSERT(sc->dmamap_seg_count == 0, + ("data pending interrupt pushed through SDHCI framework")); + + /* + * Do not use DMA for transfers less than our block size. Checking + * alignment serves little benefit, as we round transfer sizes down to + * a multiple of the block size and push the transfer back to + * SDHCI-driven PIO once we're below the block size. + */ + if (BCM_SDHCI_SEGSZ_LEFT(slot) < BCM_DMA_BLOCK_SIZE) return (0); return (1); @@ -759,10 +723,7 @@ bcm_sdhci_start_transfer(device_t dev, struct sdhci_slot *slot, { /* DMA transfer FIFO 1KB */ - if (slot->curcmd->data->flags & MMC_DATA_READ) - bcm_sdhci_read_dma(dev, slot); - else - bcm_sdhci_write_dma(dev, slot); + bcm_sdhci_start_dma(slot); } static void @@ -772,6 +733,13 @@ bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot) /* Clean up */ if (sc->dmamap_seg_count != 0) { + /* + * Our segment math should have worked out such that we would + * never finish the transfer without having used up all of the + * segments. If we haven't, that means we must have erroneously + * regressed to SDHCI-driven PIO to finish the operation and + * this is certainly caused by developer-error. + */ if (slot->curcmd->data->flags & MMC_DATA_READ) bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, BUS_DMASYNC_POSTREAD); @@ -782,15 +750,9 @@ bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot) sc->dmamap_seg_count = 0; sc->dmamap_seg_index = 0; - - slot->intmask |= DATA_PENDING_MASK; - bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, - slot->intmask); - } else { - KASSERT((slot->intmask & DATA_PENDING_MASK) == - DATA_PENDING_MASK, - ("%s: interrupt mask not restored", __func__)); } + + bcm_sdhci_dma_exit(sc); sdhci_finish_data(slot); }