Add support for the Promise command sequencer present on all modern Promise

controllers (PDC203** PDC206**).

This also adds preliminary support for the Promise SX4/SX4000 but *only*
as a "normal" Promise ATA controller (ATA RAID's are supported though
but only RAID0, RAID1 and RAID0+1).

This cuts off yet another 5-8% of the command overhead on promise controllers,
making them the fastest we have ever had support for.

Work is now continuing to add support for this in ATA RAID, to accellerate
ATA RAID quite a bit on these controllers, and especially the SX4/SX4000
series as they have quite a few tricks in there..

This commit also adds a few fixes to the SATA code needed for proper support.
This commit is contained in:
Søren Schmidt 2004-04-13 09:44:20 +00:00
parent c98dc180f6
commit f2972d7eb8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=128183
12 changed files with 648 additions and 293 deletions

View File

@ -113,7 +113,6 @@ ata_probe(device_t dev)
return EEXIST;
/* initialize the softc basics */
ata_generic_hw(ch);
ch->device[MASTER].channel = ch;
ch->device[MASTER].unit = ATA_MASTER;
ch->device[MASTER].mode = ATA_PIO;
@ -253,10 +252,7 @@ ata_reinit(struct ata_channel *ch)
ch->device[MASTER].detach) {
if (request && (request->device == &ch->device[MASTER])) {
request->result = ENXIO;
if (request->callback)
(request->callback)(request);
else
sema_post(&request->done);
request->retries = 0;
}
ch->device[MASTER].detach(&ch->device[MASTER]);
ata_fail_requests(ch, &ch->device[MASTER]);
@ -267,10 +263,7 @@ ata_reinit(struct ata_channel *ch)
ch->device[SLAVE].detach) {
if (request && (request->device == &ch->device[SLAVE])) {
request->result = ENXIO;
if (request->callback)
(request->callback)(request);
else
sema_post(&request->done);
request->retries = 0;
}
ch->device[SLAVE].detach(&ch->device[SLAVE]);
ata_fail_requests(ch, &ch->device[SLAVE]);
@ -692,7 +685,7 @@ ata_identify_devices(struct ata_channel *ch)
}
/* setup basic transfer mode by setting PIO mode and DMA if supported */
if (ch->device[MASTER].attach) {
if (ch->device[MASTER].param) {
ch->device[MASTER].setmode(&ch->device[MASTER], ATA_PIO_MAX);
if ((((ch->devices & ATA_ATAPI_MASTER) && atapi_dma &&
(ch->device[MASTER].param->config&ATA_DRQ_MASK) != ATA_DRQ_INTR)||
@ -700,7 +693,7 @@ ata_identify_devices(struct ata_channel *ch)
ch->device[MASTER].setmode(&ch->device[MASTER], ATA_DMA_MAX);
}
if (ch->device[SLAVE].attach) {
if (ch->device[SLAVE].param) {
ch->device[SLAVE].setmode(&ch->device[SLAVE], ATA_PIO_MAX);
if ((((ch->devices & ATA_ATAPI_SLAVE) && atapi_dma &&
(ch->device[SLAVE].param->config&ATA_DRQ_MASK) != ATA_DRQ_INTR) ||

View File

@ -186,8 +186,8 @@ struct ata_request {
struct {
u_int8_t command; /* command reg */
u_int8_t feature; /* feature reg */
u_int64_t lba; /* lba reg */
u_int16_t count; /* count reg */
u_int64_t lba; /* lba reg */
} ata;
struct {
u_int8_t ccb[16]; /* ATAPI command block */
@ -287,6 +287,11 @@ struct ata_dma {
bus_dmamap_t ddmamap; /* data DMA map */
struct ata_dmaentry *dmatab; /* DMA transfer table */
bus_addr_t mdmatab; /* bus address of dmatab */
bus_dma_tag_t wdmatag; /* workspace DMA tag */
bus_dmamap_t wdmamap; /* workspace DMA map */
u_int8_t *workspace; /* workspace */
bus_addr_t wdmatab; /* bus address of dmatab */
u_int32_t alignment; /* DMA engine alignment */
u_int32_t boundary; /* DMA engine boundary */
u_int32_t max_iosize; /* DMA engine max IO size */
@ -306,8 +311,9 @@ struct ata_dma {
/* structure holding lowlevel functions */
struct ata_lowlevel {
void (*reset)(struct ata_channel *ch);
int (*transaction)(struct ata_request *request);
void (*interrupt)(void *channel);
int (*transaction)(struct ata_request *request);
int (*command)(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
};
/* structure holding resources for an ATA channel */
@ -356,7 +362,7 @@ struct ata_channel {
struct mtx queue_mtx; /* queue lock */
TAILQ_HEAD(, ata_request) ata_queue; /* head of ATA queue */
void *running; /* currently running request */
struct ata_request *running; /* currently running request */
};
/* disk bay/enclosure related */
@ -402,6 +408,7 @@ char *ata_cmd2str(struct ata_request *request);
/* ata-lowlevel.c: */
void ata_generic_hw(struct ata_channel *ch);
int ata_generic_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
/* subdrivers */
void ad_attach(struct ata_device *atadev);

View File

@ -146,6 +146,7 @@ ata_pccard_probe(device_t dev)
ch->flags |= (ATA_USE_16BIT | ATA_NO_SLAVE);
ch->locking = ata_pccard_locknoop;
ch->device[MASTER].setmode = ata_pccard_setmode;
ata_generic_hw(ch);
return ata_probe(dev);
}

View File

@ -307,6 +307,7 @@ ata_cbussub_probe(device_t dev)
ch->locking = ctlr->locking;
ch->device[MASTER].setmode = ctlr->setmode;
ch->device[SLAVE].setmode = ctlr->setmode;
ata_generic_hw(ch);
return ata_probe(dev);
}

View File

@ -33,10 +33,12 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/ata.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
@ -87,16 +89,23 @@ static void ata_via_family_setmode(struct ata_device *, int);
static void ata_via_southbridge_fixup(device_t);
static int ata_promise_chipinit(device_t);
static int ata_promise_mio_allocate(device_t, struct ata_channel *);
static void ata_promise_old_intr(void *);
static void ata_promise_tx2_intr(void *);
static void ata_promise_mio_intr(void *);
static void ata_promise_setmode(struct ata_device *, int);
static void ata_promise_new_dmainit(struct ata_channel *);
static int ata_promise_new_dmastart(struct ata_channel *);
static int ata_promise_new_dmastop(struct ata_channel *);
static void ata_promise_sx4_intr(void *);
static void ata_promise_mio_dmainit(struct ata_channel *);
static int ata_promise_mio_dmastart(struct ata_channel *);
static int ata_promise_mio_dmastop(struct ata_channel *);
static void ata_promise_mio_reset(struct ata_channel *ch);
static int ata_promise_mio_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
static int ata_promise_sx4_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
static int ata_promise_apkt(u_int8_t *bytep, struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt);
static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr);
static void ata_promise_tx2_intr(void *);
static void ata_promise_old_intr(void *);
static void ata_promise_new_dmainit(struct ata_channel *);
static int ata_promise_new_dmastart(struct ata_channel *);
static int ata_promise_new_dmastop(struct ata_channel *);
static void ata_promise_setmode(struct ata_device *, int);
static int ata_serverworks_chipinit(device_t);
static void ata_serverworks_setmode(struct ata_device *, int);
static int ata_sii_chipinit(device_t);
@ -175,12 +184,17 @@ static void
ata_sata_setmode(struct ata_device *atadev, int mode)
{
/*
* we limit the transfer mode to UDMA5/ATA100 as some chips/drive
* comboes that use the Marvell SATA->PATA converters has trouble
* with UDMA6/ATA133. This doesn't really matter as real SATA
* devices doesn't use this anyway.
* if we detect that the device isn't a real SATA device we limit
* the transfer mode to UDMA5/ATA100.
* this works around the problems some devices has with the
* Marvell SATA->PATA converters and UDMA6/ATA133.
*/
mode = ata_limit_mode(atadev, mode, ATA_UDMA5);
if (atadev->param->satacapabilities != 0x0000 &&
atadev->param->satacapabilities != 0xffff)
mode = ata_limit_mode(atadev, mode, ATA_UDMA6);
else
mode = ata_limit_mode(atadev, mode, ATA_UDMA5);
if (!ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode))
atadev->mode = mode;
}
@ -814,6 +828,10 @@ ata_intel_ident(device_t dev)
{ ATA_I82801DB_1, 0, 0, 0x00, ATA_UDMA5, "Intel ICH4" },
{ ATA_I82801EB, 0, 0, 0x00, ATA_UDMA5, "Intel ICH5" },
{ ATA_I82801EB_1, 0, 0, 0x00, ATA_SA150, "Intel ICH5" },
{ ATA_I82801EB_2, 0, 0, 0x00, ATA_SA150, "Intel ICH5" },
{ ATA_I6300ESB, 0, 0, 0x00, ATA_UDMA5, "Intel ICH5" },
{ ATA_I6300ESB_1, 0, 0, 0x00, ATA_SA150, "Intel ICH5" },
{ ATA_I6300ESB_2, 0, 0, 0x00, ATA_SA150, "Intel ICH5" },
{ 0, 0, 0, 0, 0, 0}};
char buffer[64];
@ -1058,6 +1076,31 @@ ata_nvidia_chipinit(device_t dev)
/*
* Promise chipset support functions
*/
#define ATA_PDC_APKT_OFFSET 0x00000010
#define ATA_PDC_HPKT_OFFSET 0x00000040
#define ATA_PDC_ASG_OFFSET 0x00000080
#define ATA_PDC_LSG_OFFSET 0x000000c0
#define ATA_PDC_HSG_OFFSET 0x00000100
#define ATA_PDC_CHN_OFFSET 0x00000400
#define ATA_PDC_BUF_BASE 0x00400000
#define ATA_PDC_BUF_OFFSET 0x00100000
#define ATA_PDC_MAX_HPKT 8
#define ATA_PDC_WRITE_REG 0x00
#define ATA_PDC_WRITE_CTL 0x0e
#define ATA_PDC_WRITE_END 0x08
#define ATA_PDC_WAIT_NBUSY 0x10
#define ATA_PDC_WAIT_READY 0x18
#define ATA_PDC_1B 0x20
#define ATA_PDC_2B 0x40
struct ata_promise_sx4 {
struct mtx mtx;
u_int32_t array[ATA_PDC_MAX_HPKT];
int head, tail;
int busy;
};
int
ata_promise_ident(device_t dev)
{
@ -1088,6 +1131,8 @@ ata_promise_ident(device_t dev)
{ ATA_PDC20618, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20618" },
{ ATA_PDC20619, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20619" },
{ ATA_PDC20620, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20620" },
{ ATA_PDC20621, 0, PRMIO, PRSX4X, ATA_UDMA5, "Promise PDC20621" },
{ ATA_PDC20622, 0, PRMIO, PRSX4X, ATA_SA150, "Promise PDC20622" },
{ 0, 0, 0, 0, 0, 0}};
char buffer[64];
uintptr_t devid = 0;
@ -1103,6 +1148,7 @@ ata_promise_ident(device_t dev)
return ENXIO;
strcpy(buffer, idx->text);
/* if we are on a FastTrak TX4, adjust the interrupt resource */
if ((idx->cfg2 & PRTX4) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE &&
!BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)),
@ -1170,11 +1216,18 @@ ata_promise_chipinit(device_t dev)
break;
case PRMIO:
if (ctlr->r_res1)
bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1,ctlr->r_res1);
ctlr->r_type1 = SYS_RES_MEMORY;
ctlr->r_rid1 = 0x20;
if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1,
&ctlr->r_rid1, RF_ACTIVE)))
return ENXIO;
ctlr->r_type2 = SYS_RES_MEMORY;
ctlr->r_rid2 = 0x1c;
if (!(ctlr->r_res2 =
bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2,
RF_ACTIVE)))
if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2,
&ctlr->r_rid2, RF_ACTIVE)))
return ENXIO;
ctlr->dmainit = ata_promise_mio_dmainit;
@ -1191,15 +1244,45 @@ ata_promise_chipinit(device_t dev)
else
ctlr->channels = 4;
if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS,
ata_promise_mio_intr, ctlr, &ctlr->handle))) {
device_printf(dev, "unable to setup interrupt\n");
return ENXIO;
if (ctlr->chip->cfg2 & PRSX4X) {
struct ata_promise_sx4 *hpkt;
u_int32_t dimm = ATA_INL(ctlr->r_res2, 0x000c0080);
/* print info about cache memory */
device_printf(dev, "DIMM size %dMB @ 0x%08x%s\n",
(((dimm >> 16) & 0xff)-((dimm >> 24) & 0xff)+1) << 4,
((dimm >> 24) & 0xff),
ATA_INL(ctlr->r_res2, 0x000c0088) & (1<<16) ?
" ECC enabled" : "" );
ATA_OUTL(ctlr->r_res2, 0x000c000c,
(ATA_INL(ctlr->r_res2, 0x000c000c) & 0xffff0000));
ctlr->driver = malloc(sizeof(struct ata_promise_sx4),
M_TEMP, M_NOWAIT | M_ZERO);
hpkt = ctlr->driver;
mtx_init(&hpkt->mtx, "ATA promise HPKT lock", MTX_DEF, 0);
hpkt->busy = hpkt->head = hpkt->tail = 0;
if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS,
ata_promise_sx4_intr, ctlr, &ctlr->handle))) {
device_printf(dev, "unable to setup interrupt\n");
return ENXIO;
}
}
else {
if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS,
ata_promise_mio_intr, ctlr, &ctlr->handle))) {
device_printf(dev, "unable to setup interrupt\n");
return ENXIO;
}
}
break;
}
ctlr->setmode = ata_promise_setmode;
if (ctlr->chip->max_dma >= ATA_SA150)
ctlr->setmode = ata_sata_setmode;
else
ctlr->setmode = ata_promise_setmode;
return 0;
}
@ -1207,58 +1290,329 @@ static int
ata_promise_mio_allocate(device_t dev, struct ata_channel *ch)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
int offset = (ctlr->chip->cfg2 & PRSX4X) ? 0x000c0000 : 0;
int i;
for (i = ATA_DATA; i <= ATA_STATUS; i++) {
ch->r_io[i].res = ctlr->r_res2;
ch->r_io[i].offset = 0x200 + (i << 2) + (ch->unit << 7);
ch->r_io[i].res = ctlr->r_res2;
ch->r_io[i].offset = offset + 0x0200 + (i << 2) + (ch->unit << 7);
}
ch->r_io[ATA_ALTSTAT].res = ctlr->r_res2;
ch->r_io[ATA_ALTSTAT].offset = 0x238 + (ch->unit << 7);
ch->r_io[ATA_BMCMD_PORT].res = ctlr->r_res2;
ch->r_io[ATA_BMCMD_PORT].offset = 0x260 + (ch->unit << 7);
ch->r_io[ATA_BMDTP_PORT].res = ctlr->r_res2;
ch->r_io[ATA_BMDTP_PORT].offset = 0x244 + (ch->unit << 7);
ch->r_io[ATA_BMDEVSPEC_0].res = ctlr->r_res2;
ch->r_io[ATA_BMDEVSPEC_0].offset = ((ch->unit + 1) << 2);
ch->r_io[ATA_ALTSTAT].offset = offset + 0x0238 + (ch->unit << 7);
ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res2;
ch->flags |= ATA_USE_16BIT;
ch->reset = ata_promise_mio_reset;
ATA_IDX_OUTL(ch, ATA_BMCMD_PORT,
(ATA_IDX_INL(ch, ATA_BMCMD_PORT) & ~0x00003f9f) |
(ch->unit + 1));
ATA_IDX_OUTL(ch, ATA_BMDEVSPEC_0, 0x00000001);
ch->flags |= (ATA_NO_SLAVE | ATA_USE_16BIT);
ctlr->dmainit(ch);
ctlr->dmainit(ch);
ata_generic_hw(ch);
if (ctlr->chip->cfg2 & PRSX4X)
ch->hw.command = ata_promise_sx4_command;
else
ch->hw.command = ata_promise_mio_command;
return 0;
}
static void
ata_promise_old_intr(void *data)
ata_promise_mio_intr(void *data)
{
struct ata_pci_controller *ctlr = data;
struct ata_channel *ch;
u_int32_t vector = ATA_INL(ctlr->r_res2, 0x00040);
int unit;
/* implement this as a toggle instead to balance load XXX */
for (unit = 0; unit < 2; unit++) {
if (!(ch = ctlr->interrupt[unit].argument))
continue;
if (ATA_INL(ctlr->r_res1, 0x1c) & (ch->unit ? 0x00004000 : 0x00000400)) {
if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) {
int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) !=
ATA_BMSTAT_INTERRUPT)
continue;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR);
DELAY(1);
}
ctlr->interrupt[unit].function(ch);
for (unit = 0; unit < ctlr->channels; unit++) {
if (vector & (1 << (unit + 1))) {
if ((ch = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(ch);
}
}
}
static void
ata_promise_sx4_intr(void *data)
{
struct ata_pci_controller *ctlr = data;
struct ata_channel *ch;
u_int32_t vector = ATA_INL(ctlr->r_res2, 0x000c0480);
int unit;
for (unit = 0; unit < ctlr->channels; unit++) {
if (vector & (1 << (unit + 1)))
if ((ch = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(ch);
if (vector & (1 << (unit + 5)))
if ((ch = ctlr->interrupt[unit].argument))
ata_promise_queue_hpkt(ctlr,
htole32((ch->unit * ATA_PDC_CHN_OFFSET) +
ATA_PDC_HPKT_OFFSET));
if (vector & (1 << (unit + 9))) {
ata_promise_next_hpkt(ctlr);
if ((ch = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(ch);
}
if (vector & (1 << (unit + 13))) {
ata_promise_next_hpkt(ctlr);
if ((ch = ctlr->interrupt[unit].argument))
ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
htole32((ch->unit * ATA_PDC_CHN_OFFSET) +
ATA_PDC_APKT_OFFSET));
}
}
}
static void
ata_promise_mio_dmainit(struct ata_channel *ch)
{
ata_dmainit(ch);
if (ch->dma) {
ch->dma->start = ata_promise_mio_dmastart;
ch->dma->stop = ata_promise_mio_dmastop;
}
}
static int
ata_promise_mio_dmastart(struct ata_channel *ch)
{
return 0;
}
static int
ata_promise_mio_dmastop(struct ata_channel *ch)
{
/* get status XXX SOS */
return 0;
}
static void
ata_promise_mio_reset(struct ata_channel *ch)
{
struct ata_pci_controller *ctlr =
device_get_softc(device_get_parent(ch->dev));
int offset = (ctlr->chip->cfg2 & PRSX4X) ? 0x000c0000 : 0;
ATA_OUTL(ctlr->r_res2, offset + 0x0260 + (ch->unit << 7),
(ATA_INL(ctlr->r_res2, offset + 0x0260 + (ch->unit << 7)) &
~0x00003f9f) | (ch->unit + 1));
if (ctlr->chip->cfg2 & PRSX4X) {
struct ata_promise_sx4 *hpktp = ctlr->driver;
mtx_lock(&hpktp->mtx);
ATA_OUTL(ctlr->r_res2, 0xc012c,
(ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f) | (1 << 11));
DELAY(10);
ATA_OUTL(ctlr->r_res2, 0xc012c,
(ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f));
mtx_unlock(&hpktp->mtx);
}
}
static int
ata_promise_mio_command(struct ata_device *atadev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
{
struct ata_pci_controller *ctlr =
device_get_softc(device_get_parent(atadev->channel->dev));
u_int32_t *wordp = (u_int32_t *)atadev->channel->dma->workspace;
ATA_OUTL(ctlr->r_res2, (atadev->channel->unit + 1) << 2, 0x00000001);
/* if not a DMA read/write fall back to generic ATA handling code */
if (command != ATA_READ_DMA && command != ATA_WRITE_DMA)
return ata_generic_command(atadev, command, lba, count, feature);
if (command == ATA_READ_DMA)
wordp[0] = htole32(0x04 | ((atadev->channel->unit+1)<<16) | (0x00<<24));
if (command == ATA_WRITE_DMA)
wordp[0] = htole32(0x00 | ((atadev->channel->unit+1)<<16) | (0x00<<24));
wordp[1] = atadev->channel->dma->mdmatab;
wordp[2] = 0;
ata_promise_apkt((u_int8_t*)wordp, atadev, command, lba, count, feature);
ATA_OUTL(ctlr->r_res2, 0x0240 + (atadev->channel->unit << 7),
atadev->channel->dma->wdmatab);
return 0;
}
static int
ata_promise_sx4_command(struct ata_device *atadev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
{
struct ata_channel *ch = atadev->channel;
struct ata_pci_controller *ctlr =
device_get_softc(device_get_parent(ch->dev));
caddr_t window = rman_get_virtual(ctlr->r_res1);
u_int32_t *wordp;
int i, idx, length = 0;
if (command == ATA_ATA_IDENTIFY) {
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
return ata_generic_command(atadev, command, lba, count, feature);
}
if (ch->running->flags & ATA_R_CONTROL) {
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET);
wordp[0] = htole32(0x08 | ((ch->unit + 1)<<16) | (0x00 << 24));
wordp[1] = 0;
wordp[2] = 0;
ata_promise_apkt((u_int8_t *)wordp, atadev, command, lba,count,feature);
ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_APKT_OFFSET));
return 0;
}
if (command != ATA_READ_DMA && command != ATA_WRITE_DMA)
return -1;
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HSG_OFFSET);
i = idx = 0;
do {
wordp[idx++] = htole32(ch->dma->dmatab[i].base);
wordp[idx++] = htole32(ch->dma->dmatab[i].count & ~ATA_DMA_EOT);
length += (ch->dma->dmatab[i].count & ~ATA_DMA_EOT);
} while (!(ch->dma->dmatab[i++].count & ATA_DMA_EOT));
wordp[idx - 1] |= htole32(ATA_DMA_EOT);
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_LSG_OFFSET);
wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE);
wordp[1] = htole32((count * DEV_BSIZE) | ATA_DMA_EOT);
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_ASG_OFFSET);
wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE);
wordp[1] = htole32((count * DEV_BSIZE) | ATA_DMA_EOT);
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET);
if (command == ATA_READ_DMA)
wordp[0] = htole32(0x14 | ((ch->unit + 9)<<16) | ((ch->unit + 5)<<24));
if (command == ATA_WRITE_DMA)
wordp[0] = htole32(0x00 | ((ch->unit + 13) << 16) | (0x00 << 24));
wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_HSG_OFFSET);
wordp[2] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_LSG_OFFSET);
wordp[3] = 0;
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET);
if (command == ATA_READ_DMA) {
wordp[0] = htole32(0x04 | ((ch->unit + 5) << 16) | (0x00 << 24));
wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET);
}
if (command == ATA_WRITE_DMA) {
wordp[0] = htole32(0x10 | ((ch->unit + 1)<<16) | ((ch->unit + 13)<<24));
wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET);
}
wordp[2] = 0;
ata_promise_apkt((u_int8_t *)wordp, atadev, command, lba, count, feature);
ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001);
if (command == ATA_READ_DMA) {
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 5) << 2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 9) << 2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_APKT_OFFSET));
}
if (command == ATA_WRITE_DMA) {
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 13) << 2), 0x00000001);
ata_promise_queue_hpkt(ctlr, htole32((ch->unit * ATA_PDC_CHN_OFFSET) +
ATA_PDC_HPKT_OFFSET));
}
return 0;
}
static int
ata_promise_apkt(u_int8_t *bytep, struct ata_device *atadev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
{
int i = 12;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_PDC_WAIT_NBUSY|ATA_DRIVE;
bytep[i++] = ATA_D_IBM | ATA_D_LBA | atadev->unit;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_CTL;
bytep[i++] = ATA_A_4BIT;
if ((lba > 268435455 || count > 256) && atadev->param &&
(atadev->param->support.command2 & ATA_SUPPORT_ADDRESS48)) {
atadev->channel->flags |= ATA_48BIT_ACTIVE;
if (command == ATA_READ_DMA)
command = ATA_READ_DMA48;
if (command == ATA_WRITE_DMA)
command = ATA_WRITE_DMA48;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_FEATURE;
bytep[i++] = (feature >> 8) & 0xff;
bytep[i++] = feature & 0xff;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_COUNT;
bytep[i++] = (count >> 8) & 0xff;
bytep[i++] = count & 0xff;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_SECTOR;
bytep[i++] = (lba >> 24) & 0xff;
bytep[i++] = lba & 0xff;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_LSB;
bytep[i++] = (lba >> 32) & 0xff;
bytep[i++] = (lba >> 8) & 0xff;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_MSB;
bytep[i++] = (lba >> 40) & 0xff;
bytep[i++] = (lba >> 16) & 0xff;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE;
bytep[i++] = ATA_D_LBA | atadev->unit;
}
else {
atadev->channel->flags &= ~ATA_48BIT_ACTIVE;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_FEATURE;
bytep[i++] = feature;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_COUNT;
bytep[i++] = count;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_SECTOR;
bytep[i++] = lba & 0xff;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_LSB;
bytep[i++] = (lba >> 8) & 0xff;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_MSB;
bytep[i++] = (lba >> 16) & 0xff;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE;
bytep[i++] = (atadev->flags & ATA_D_USE_CHS ? 0 : ATA_D_LBA) |
ATA_D_IBM | atadev->unit | ((lba >> 24) & 0xf);
}
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_END | ATA_CMD;
bytep[i++] = command;
return i;
}
static void
ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt)
{
struct ata_promise_sx4 *hpktp = ctlr->driver;
mtx_lock(&hpktp->mtx);
if (hpktp->tail == hpktp->head && !hpktp->busy) {
ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt);
hpktp->busy = 1;
}
else
hpktp->array[(hpktp->head++) & (ATA_PDC_MAX_HPKT - 1)] = hpkt;
mtx_unlock(&hpktp->mtx);
}
static void
ata_promise_next_hpkt(struct ata_pci_controller *ctlr)
{
struct ata_promise_sx4 *hpktp = ctlr->driver;
mtx_lock(&hpktp->mtx);
if (hpktp->tail != hpktp->head) {
ATA_OUTL(ctlr->r_res2, 0x000c0100,
hpktp->array[(hpktp->tail++) & (ATA_PDC_MAX_HPKT - 1)]);
}
else
hpktp->busy = 0;
mtx_unlock(&hpktp->mtx);
}
static void
ata_promise_tx2_intr(void *data)
{
@ -1287,102 +1641,31 @@ ata_promise_tx2_intr(void *data)
}
static void
ata_promise_mio_intr(void *data)
ata_promise_old_intr(void *data)
{
struct ata_pci_controller *ctlr = data;
struct ata_channel *ch;
u_int32_t irq_vector;
int unit;
irq_vector = ATA_INL(ctlr->r_res2, 0x0040);
for (unit = 0; unit < ctlr->channels; unit++) {
if (irq_vector & (1 << (unit + 1))) {
if ((ch = ctlr->interrupt[unit].argument)) {
ctlr->interrupt[unit].function(ch);
ATA_IDX_OUTL(ch, ATA_BMCMD_PORT,
(ATA_IDX_INL(ch, ATA_BMCMD_PORT) & ~0x00003f9f) |
(ch->unit + 1));
ATA_IDX_OUTL(ch, ATA_BMDEVSPEC_0, 0x00000001);
/* implement this as a toggle instead to balance load XXX */
for (unit = 0; unit < 2; unit++) {
if (!(ch = ctlr->interrupt[unit].argument))
continue;
if (ATA_INL(ctlr->r_res1, 0x1c) & (ch->unit ? 0x00004000 : 0x00000400)){
if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) {
int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) !=
ATA_BMSTAT_INTERRUPT)
continue;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR);
DELAY(1);
}
ctlr->interrupt[unit].function(ch);
}
}
}
static void
ata_promise_setmode(struct ata_device *atadev, int mode)
{
device_t parent = device_get_parent(atadev->channel->dev);
struct ata_pci_controller *ctlr = device_get_softc(parent);
int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit);
int error;
u_int32_t timings33[][2] = {
/* PROLD PRNEW mode */
{ 0x004ff329, 0x004fff2f }, /* PIO 0 */
{ 0x004fec25, 0x004ff82a }, /* PIO 1 */
{ 0x004fe823, 0x004ff026 }, /* PIO 2 */
{ 0x004fe622, 0x004fec24 }, /* PIO 3 */
{ 0x004fe421, 0x004fe822 }, /* PIO 4 */
{ 0x004567f3, 0x004acef6 }, /* MWDMA 0 */
{ 0x004467f3, 0x0048cef6 }, /* MWDMA 1 */
{ 0x004367f3, 0x0046cef6 }, /* MWDMA 2 */
{ 0x004367f3, 0x0046cef6 }, /* UDMA 0 */
{ 0x004247f3, 0x00448ef6 }, /* UDMA 1 */
{ 0x004127f3, 0x00436ef6 }, /* UDMA 2 */
{ 0, 0x00424ef6 }, /* UDMA 3 */
{ 0, 0x004127f3 }, /* UDMA 4 */
{ 0, 0x004127f3 } /* UDMA 5 */
};
mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma);
switch (ctlr->chip->cfg1) {
case PROLD:
case PRNEW:
if (mode > ATA_UDMA2 && (pci_read_config(parent, 0x50, 2) &
(atadev->channel->unit ? 1 << 11 : 1 << 10))) {
ata_prtdev(atadev,
"DMA limited to UDMA33, non-ATA66 cable or device\n");
mode = ATA_UDMA2;
}
if (ATAPI_DEVICE(atadev) && mode > ATA_PIO_MAX)
mode = ata_limit_mode(atadev, mode, ATA_PIO_MAX);
break;
case PRTX:
ATA_IDX_OUTB(atadev->channel, ATA_BMDEVSPEC_0, 0x0b);
if (mode > ATA_UDMA2 &&
ATA_IDX_INB(atadev->channel, ATA_BMDEVSPEC_1) & 0x04) {
ata_prtdev(atadev,
"DMA limited to UDMA33, non-ATA66 cable or device\n");
mode = ATA_UDMA2;
}
break;
case PRMIO:
if (mode > ATA_UDMA2 &&
(ATA_IDX_INL(atadev->channel, ATA_BMCMD_PORT) & 0x01000000)) {
ata_prtdev(atadev,
"DMA limited to UDMA33, non-ATA66 cable or device\n");
mode = ATA_UDMA2;
}
break;
}
error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
if (bootverbose)
ata_prtdev(atadev, "%ssetting %s on %s chip\n",
(error) ? "FAILURE " : "",
ata_mode2str(mode), ctlr->chip->text);
if (!error) {
if (ctlr->chip->cfg1 < PRTX)
pci_write_config(parent, 0x60 + (devno << 2),
timings33[ctlr->chip->cfg1][ata_mode2idx(mode)],4);
atadev->mode = mode;
}
return;
}
static void
ata_promise_new_dmainit(struct ata_channel *ch)
{
@ -1435,31 +1718,80 @@ ata_promise_new_dmastop(struct ata_channel *ch)
}
static void
ata_promise_mio_dmainit(struct ata_channel *ch)
ata_promise_setmode(struct ata_device *atadev, int mode)
{
ata_dmainit(ch);
if (ch->dma) {
ch->dma->start = ata_promise_mio_dmastart;
ch->dma->stop = ata_promise_mio_dmastop;
device_t parent = device_get_parent(atadev->channel->dev);
struct ata_pci_controller *ctlr = device_get_softc(parent);
int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit);
int error;
u_int32_t timings33[][2] = {
/* PROLD PRNEW mode */
{ 0x004ff329, 0x004fff2f }, /* PIO 0 */
{ 0x004fec25, 0x004ff82a }, /* PIO 1 */
{ 0x004fe823, 0x004ff026 }, /* PIO 2 */
{ 0x004fe622, 0x004fec24 }, /* PIO 3 */
{ 0x004fe421, 0x004fe822 }, /* PIO 4 */
{ 0x004567f3, 0x004acef6 }, /* MWDMA 0 */
{ 0x004467f3, 0x0048cef6 }, /* MWDMA 1 */
{ 0x004367f3, 0x0046cef6 }, /* MWDMA 2 */
{ 0x004367f3, 0x0046cef6 }, /* UDMA 0 */
{ 0x004247f3, 0x00448ef6 }, /* UDMA 1 */
{ 0x004127f3, 0x00436ef6 }, /* UDMA 2 */
{ 0, 0x00424ef6 }, /* UDMA 3 */
{ 0, 0x004127f3 }, /* UDMA 4 */
{ 0, 0x004127f3 } /* UDMA 5 */
};
mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma);
switch (ctlr->chip->cfg1) {
case PROLD:
case PRNEW:
if (mode > ATA_UDMA2 && (pci_read_config(parent, 0x50, 2) &
(atadev->channel->unit ? 1 << 11 : 1 << 10))) {
ata_prtdev(atadev,
"DMA limited to UDMA33, non-ATA66 cable or device\n");
mode = ATA_UDMA2;
}
if (ATAPI_DEVICE(atadev) && mode > ATA_PIO_MAX)
mode = ata_limit_mode(atadev, mode, ATA_PIO_MAX);
break;
case PRTX:
ATA_IDX_OUTB(atadev->channel, ATA_BMDEVSPEC_0, 0x0b);
if (mode > ATA_UDMA2 &&
ATA_IDX_INB(atadev->channel, ATA_BMDEVSPEC_1) & 0x04) {
ata_prtdev(atadev,
"DMA limited to UDMA33, non-ATA66 cable or device\n");
mode = ATA_UDMA2;
}
break;
case PRMIO:
if (mode > ATA_UDMA2 &&
(ATA_INL(ctlr->r_res2,
(ctlr->chip->cfg2 & PRSX4X ? 0x000c0260 : 0x0260) +
(atadev->channel->unit << 7)) & 0x01000000)) {
ata_prtdev(atadev,
"DMA limited to UDMA33, non-ATA66 cable or device\n");
mode = ATA_UDMA2;
}
break;
}
}
static int
ata_promise_mio_dmastart(struct ata_channel *ch)
{
ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->mdmatab);
ATA_IDX_OUTL(ch, ATA_BMCMD_PORT,
(ATA_IDX_INL(ch, ATA_BMCMD_PORT) & ~0x000000c0) |
((ch->dma->flags & ATA_DMA_READ) ? 0x00000080 : 0x000000c0));
return 0;
}
error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
static int
ata_promise_mio_dmastop(struct ata_channel *ch)
{
ATA_IDX_OUTL(ch, ATA_BMCMD_PORT,
ATA_IDX_INL(ch, ATA_BMCMD_PORT) & ~0x00000080);
return 0;
if (bootverbose)
ata_prtdev(atadev, "%ssetting %s on %s chip\n",
(error) ? "FAILURE " : "",
ata_mode2str(mode), ctlr->chip->text);
if (!error) {
if (ctlr->chip->cfg1 < PRTX)
pci_write_config(parent, 0x60 + (devno << 2),
timings33[ctlr->chip->cfg1][ata_mode2idx(mode)],4);
atadev->mode = mode;
}
return;
}
/*
@ -1634,9 +1966,8 @@ ata_sii_chipinit(device_t dev)
ctlr->r_type2 = SYS_RES_MEMORY;
ctlr->r_rid2 = 0x24;
if (!(ctlr->r_res2 =
bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2,
RF_ACTIVE)))
if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2,
&ctlr->r_rid2, RF_ACTIVE)))
return ENXIO;
if (ctlr->chip->cfg2 & SIISETCLK) {
@ -1716,6 +2047,8 @@ ata_sii_mio_allocate(device_t dev, struct ata_channel *ch)
if (ctlr->chip->cfg2 & SIIBUG)
ch->dma->boundary = 8 * 1024;
ata_generic_hw(ch);
return 0;
}

View File

@ -104,8 +104,7 @@ ad_attach(struct ata_device *atadev)
((u_int32_t)atadev->param->lba_size_2 << 16);
/* does this device need oldstyle CHS addressing */
if (!ad_version(atadev->param->version_major) ||
!(atadev->param->atavalid & ATA_FLAG_54_58) || !lbasize)
if (!ad_version(atadev->param->version_major) || !lbasize)
atadev->flags |= ATA_D_USE_CHS;
/* use the 28bit LBA size if valid or bigger than the CHS mapping */
@ -393,8 +392,8 @@ ad_print(struct ad_softc *adp)
(adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
ata_mode2str(adp->device->mode));
}
else
ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
else {
ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s",
(unsigned long long)(adp->total_secs /
((1024L * 1024L) / DEV_BSIZE)),
adp->device->param->model,
@ -403,8 +402,14 @@ ad_print(struct ad_softc *adp)
adp->heads, adp->sectors,
device_get_unit(adp->device->channel->dev),
(adp->device->unit == ATA_MASTER) ? "master" : "slave",
(adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
ata_mode2str(adp->device->mode));
(adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "");
if (adp->device->param->satacapabilities != 0x0000 &&
adp->device->param->satacapabilities != 0xffff)
printf("SATA150\n");
else
printf("%s\n", ata_mode2str(adp->device->mode));
}
}
static int

View File

@ -59,6 +59,7 @@ static MALLOC_DEFINE(M_ATADMA, "ATA DMA", "ATA driver DMA");
/* misc defines */
#define MAXSEGSZ PAGE_SIZE
#define MAXTABSZ PAGE_SIZE
#define MAXWSPCSZ 256
#define MAXCTLDMASZ (2 * (MAXTABSZ + MAXPHYS))
struct ata_dc_cb_args {
@ -121,16 +122,35 @@ ata_dmaalloc(struct ata_channel *ch)
if (bus_dmamap_load(ch->dma->cdmatag, ch->dma->cdmamap, ch->dma->dmatab,
MAXTABSZ, ata_dmasetupc_cb, &ccba, 0) || ccba.error) {
bus_dmamem_free(ch->dma->cdmatag, ch->dma->dmatab,ch->dma->cdmamap);
bus_dmamem_free(ch->dma->cdmatag, ch->dma->dmatab, ch->dma->cdmamap);
goto error;
}
ch->dma->mdmatab = ccba.maddr;
if (bus_dmamap_create(ch->dma->ddmatag, 0, &ch->dma->ddmamap))
goto error;
if (bus_dma_tag_create(ch->dma->dmatag, PAGE_SIZE, PAGE_SIZE,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL, MAXWSPCSZ, 1, MAXWSPCSZ,
BUS_DMA_ALLOCNOW, NULL, NULL, &ch->dma->wdmatag))
goto error;
if (bus_dmamem_alloc(ch->dma->wdmatag, (void **)&ch->dma->workspace, 0,
&ch->dma->wdmamap))
goto error;
if (bus_dmamap_load(ch->dma->wdmatag, ch->dma->wdmamap, ch->dma->workspace,
MAXWSPCSZ, ata_dmasetupc_cb, &ccba, 0) || ccba.error) {
bus_dmamem_free(ch->dma->wdmatag, ch->dma->workspace, ch->dma->wdmamap);
goto error;
}
ch->dma->wdmatab = ccba.maddr;
return;
error:
ata_printf(ch, -1, "WARNING - DMA tag allocation failed, disabling DMA\n");
ata_printf(ch, -1, "WARNING - DMA allocation failed, disabling DMA\n");
ata_dmafree(ch);
free(ch->dma, M_ATADMA);
ch->dma = NULL;
@ -139,6 +159,17 @@ ata_dmaalloc(struct ata_channel *ch)
static void
ata_dmafree(struct ata_channel *ch)
{
if (ch->dma->wdmatab) {
bus_dmamap_unload(ch->dma->wdmatag, ch->dma->wdmamap);
bus_dmamem_free(ch->dma->wdmatag, ch->dma->workspace, ch->dma->wdmamap);
ch->dma->wdmatab = 0;
ch->dma->wdmamap = NULL;
ch->dma->workspace = NULL;
}
if (ch->dma->wdmatag) {
bus_dma_tag_destroy(ch->dma->wdmatag);
ch->dma->wdmatag = NULL;
}
if (ch->dma->mdmatab) {
bus_dmamap_unload(ch->dma->cdmatag, ch->dma->cdmamap);
bus_dmamem_free(ch->dma->cdmatag, ch->dma->dmatab, ch->dma->cdmamap);

View File

@ -117,6 +117,7 @@ ata_isa_probe(device_t dev)
ch->locking = ata_isa_lock;
ch->device[MASTER].setmode = ata_isa_setmode;
ch->device[SLAVE].setmode = ata_isa_setmode;
ata_generic_hw(ch);
return ata_probe(dev);
}

View File

@ -44,11 +44,11 @@ __FBSDID("$FreeBSD$");
#include <dev/ata/ata-all.h>
/* prototypes */
static int ata_transaction(struct ata_request *);
static void ata_interrupt(void *);
static void ata_reset(struct ata_channel *);
static int ata_generic_transaction(struct ata_request *);
static void ata_generic_interrupt(void *);
static void ata_generic_reset(struct ata_channel *);
static int ata_wait(struct ata_device *, u_int8_t);
static int ata_command(struct ata_device *, u_int8_t, u_int64_t, u_int16_t, u_int16_t);
/*static int ata_command(struct ata_device *, u_int8_t, u_int64_t, u_int16_t, u_int16_t);*/
static void ata_pio_read(struct ata_request *, int);
static void ata_pio_write(struct ata_request *, int);
@ -61,15 +61,18 @@ static int atadebug = 0;
void
ata_generic_hw(struct ata_channel *ch)
{
ch->hw.reset = ata_reset;
ch->hw.transaction = ata_transaction;
ch->hw.interrupt = ata_interrupt;
ch->hw.reset = ata_generic_reset;
ch->hw.transaction = ata_generic_transaction;
ch->hw.interrupt = ata_generic_interrupt;
ch->hw.command = ata_generic_command;
}
/* must be called with ATA channel locked */
static int
ata_transaction(struct ata_request *request)
ata_generic_transaction(struct ata_request *request)
{
struct ata_channel *ch = request->device->channel;
/* safetybelt for HW that went away */
if (!request->device->param || request->device->channel->flags&ATA_HWGONE) {
request->result = ENXIO;
@ -77,12 +80,12 @@ ata_transaction(struct ata_request *request)
}
/* record the request as running */
request->device->channel->running = request;
ch->running = request;
ATA_DEBUG_RQ(request, "transaction");
/* disable ATAPI DMA writes if HW doesn't support it */
if ((request->device->channel->flags & ATA_ATAPI_DMA_RO) &&
if ((ch->flags & ATA_ATAPI_DMA_RO) &&
((request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)) ==
(ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)))
request->flags &= ~ATA_R_DMA;
@ -96,9 +99,9 @@ ata_transaction(struct ata_request *request)
int write = (request->flags & ATA_R_WRITE);
/* issue command */
if (ata_command(request->device, request->u.ata.command,
request->u.ata.lba, request->u.ata.count,
request->u.ata.feature)) {
if (ch->hw.command(request->device, request->u.ata.command,
request->u.ata.lba, request->u.ata.count,
request->u.ata.feature)) {
ata_prtdev(request->device, "error issueing PIO command\n");
request->result = EIO;
break;
@ -122,26 +125,24 @@ ata_transaction(struct ata_request *request)
/* ATA DMA data transfer commands */
case ATA_R_DMA:
/* check sanity, setup SG list and DMA engine */
if (request->device->channel->dma->load(request->device,
request->data,
request->bytecount,
request->flags & ATA_R_READ)) {
if (ch->dma->load(request->device, request->data, request->bytecount,
request->flags & ATA_R_READ)) {
ata_prtdev(request->device, "setting up DMA failed\n");
request->result = EIO;
break;
}
/* issue command */
if (ata_command(request->device, request->u.ata.command,
request->u.ata.lba, request->u.ata.count,
request->u.ata.feature)) {
if (ch->hw.command(request->device, request->u.ata.command,
request->u.ata.lba, request->u.ata.count,
request->u.ata.feature)) {
ata_prtdev(request->device, "error issuing DMA command\n");
request->result = EIO;
break;
}
/* start DMA engine */
if (request->device->channel->dma->start(request->device->channel)) {
if (ch->dma->start(ch)) {
ata_prtdev(request->device, "error starting DMA\n");
request->result = EIO;
break;
@ -154,17 +155,17 @@ ata_transaction(struct ata_request *request)
case ATA_R_ATAPI:
/* is this just a POLL DSC command ? */
if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
ATA_IDX_OUTB(request->device->channel, ATA_DRIVE,
ATA_IDX_OUTB(ch, ATA_DRIVE,
ATA_D_IBM | request->device->unit);
DELAY(10);
if (!(ATA_IDX_INB(request->device->channel, ATA_ALTSTAT)&ATA_S_DSC))
if (!(ATA_IDX_INB(ch, ATA_ALTSTAT)&ATA_S_DSC))
request->result = EBUSY;
break;
}
/* start ATAPI operation */
if (ata_command(request->device, ATA_PACKET_CMD,
request->transfersize << 8, 0, 0)) {
if (ch->hw.command(request->device, ATA_PACKET_CMD,
request->transfersize << 8, 0, 0)) {
ata_prtdev(request->device, "error issuing ATA PACKET command\n");
request->result = EIO;
break;
@ -178,8 +179,8 @@ ata_transaction(struct ata_request *request)
{
int timeout = 5000; /* might be less for fast devices */
while (timeout--) {
int reason = ATA_IDX_INB(request->device->channel, ATA_IREASON);
int status = ATA_IDX_INB(request->device->channel, ATA_STATUS);
int reason = ATA_IDX_INB(ch, ATA_IREASON);
int status = ATA_IDX_INB(ch, ATA_STATUS);
if (((reason & (ATA_I_CMD | ATA_I_IN)) |
(status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
@ -198,7 +199,7 @@ ata_transaction(struct ata_request *request)
DELAY(10);
/* output actual command block */
ATA_IDX_OUTSW_STRM(request->device->channel, ATA_DATA,
ATA_IDX_OUTSW_STRM(ch, ATA_DATA,
(int16_t *)request->u.atapi.ccb,
(request->device->param->config & ATA_PROTO_MASK) ==
ATA_PROTO_ATAPI_12 ? 6 : 8);
@ -209,16 +210,16 @@ ata_transaction(struct ata_request *request)
case ATA_R_ATAPI|ATA_R_DMA:
/* is this just a POLL DSC command ? */
if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
ATA_IDX_OUTB(request->device->channel, ATA_DRIVE,
ATA_IDX_OUTB(ch, ATA_DRIVE,
ATA_D_IBM | request->device->unit);
DELAY(10);
if (!(ATA_IDX_INB(request->device->channel, ATA_ALTSTAT)&ATA_S_DSC))
if (!(ATA_IDX_INB(ch, ATA_ALTSTAT)&ATA_S_DSC))
request->result = EBUSY;
break;
}
/* check sanity, setup SG list and DMA engine */
if (request->device->channel->dma->load(request->device,
if (ch->dma->load(request->device,
request->data,
request->bytecount,
request->flags & ATA_R_READ)) {
@ -228,7 +229,7 @@ ata_transaction(struct ata_request *request)
}
/* start ATAPI operation */
if (ata_command(request->device, ATA_PACKET_CMD, 0, 0, ATA_F_DMA)) {
if (ch->hw.command(request->device, ATA_PACKET_CMD, 0, 0, ATA_F_DMA)) {
ata_prtdev(request->device, "error issuing ATAPI packet command\n");
request->result = EIO;
break;
@ -238,8 +239,8 @@ ata_transaction(struct ata_request *request)
{
int timeout = 5000; /* might be less for fast devices */
while (timeout--) {
int reason = ATA_IDX_INB(request->device->channel, ATA_IREASON);
int status = ATA_IDX_INB(request->device->channel, ATA_STATUS);
int reason = ATA_IDX_INB(ch, ATA_IREASON);
int status = ATA_IDX_INB(ch, ATA_STATUS);
if (((reason & (ATA_I_CMD | ATA_I_IN)) |
(status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
@ -257,13 +258,13 @@ ata_transaction(struct ata_request *request)
DELAY(10);
/* output actual command block */
ATA_IDX_OUTSW_STRM(request->device->channel, ATA_DATA,
ATA_IDX_OUTSW_STRM(ch, ATA_DATA,
(int16_t *)request->u.atapi.ccb,
(request->device->param->config & ATA_PROTO_MASK) ==
ATA_PROTO_ATAPI_12 ? 6 : 8);
/* start DMA engine */
if (request->device->channel->dma->start(request->device->channel)) {
if (ch->dma->start(ch)) {
request->result = EIO;
break;
}
@ -273,14 +274,14 @@ ata_transaction(struct ata_request *request)
}
/* request finish here */
if (request->device->channel->dma->flags & ATA_DMA_ACTIVE)
request->device->channel->dma->unload(request->device->channel);
request->device->channel->running = NULL;
if (ch->dma->flags & ATA_DMA_ACTIVE)
ch->dma->unload(ch);
ch->running = NULL;
return ATA_OP_FINISHED;
}
static void
ata_interrupt(void *data)
ata_generic_interrupt(void *data)
{
struct ata_channel *ch = (struct ata_channel *)data;
struct ata_request *request = ch->running;
@ -320,7 +321,7 @@ ata_interrupt(void *data)
/* clear interrupt and get status */
request->status = ATA_IDX_INB(ch, ATA_STATUS);
/* register interrupt */
if (!(request->flags & ATA_R_TIMEOUT))
request->flags |= ATA_R_INTR_SEEN;
@ -391,8 +392,10 @@ ata_interrupt(void *data)
/* ATA DMA data transfer commands */
case ATA_R_DMA:
/* stop DMA engine and get status */
request->dmastat = ch->dma->stop(ch);
if (ch->dma->stop)
request->dmastat = ch->dma->stop(ch);
/* did we get error or data */
if (request->status & ATA_S_ERROR)
@ -498,7 +501,8 @@ ata_interrupt(void *data)
case ATA_R_ATAPI|ATA_R_DMA:
/* stop the engine and get engine status */
request->dmastat = ch->dma->stop(ch);
if (ch->dma->stop)
request->dmastat = ch->dma->stop(ch);
/* did we get error or data */
if (request->status & (ATA_S_ERROR | ATA_S_DWF))
@ -528,12 +532,16 @@ ata_interrupt(void *data)
/* must be called with ATA channel locked */
static void
ata_reset(struct ata_channel *ch)
ata_generic_reset(struct ata_channel *ch)
{
u_int8_t err, lsb, msb, ostat0, ostat1;
u_int8_t stat0 = 0, stat1 = 0;
int mask = 0, timeout;
/* reset host end of channel (if supported) */
if (ch->reset)
ch->reset(ch);
/* do we have any signs of ATA/ATAPI HW being present ? */
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_MASTER);
DELAY(10);
@ -565,10 +573,6 @@ ata_reset(struct ata_channel *ch)
ata_printf(ch, -1, "reset tp1 mask=%02x ostat0=%02x ostat1=%02x\n",
mask, ostat0, ostat1);
/* reset host end of channel (if supported) */
if (ch->reset)
ch->reset(ch);
/* reset (both) devices on this channel */
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_MASTER);
DELAY(10);
@ -712,9 +716,9 @@ ata_wait(struct ata_device *atadev, u_int8_t mask)
return -1;
}
static int
ata_command(struct ata_device *atadev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
int
ata_generic_command(struct ata_device *atadev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
{
if (atadebug)
ata_prtdev(atadev, "ata_command: addr=%04lx, command=%02x, "

View File

@ -188,7 +188,8 @@ ata_pci_attach(device_t dev)
device_add_child(dev, "ata", ATA_MASTERDEV(dev) ?
unit : devclass_find_free_unit(ata_devclass, 2));
return bus_generic_attach(dev); }
return bus_generic_attach(dev);
}
static int
ata_pci_detach(device_t dev)
@ -247,55 +248,33 @@ ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
switch (*rid) {
case ATA_IOADDR_RID:
if (ATA_MASTERDEV(dev)) {
myrid = 0;
start = (unit ? ATA_SECONDARY : ATA_PRIMARY);
end = start + ATA_IOSIZE - 1;
count = ATA_IOSIZE;
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
}
else {
myrid = 0x10 + 8 * unit;
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
end = start + count - 1;
}
myrid = 0x10 + 8 * unit;
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
break;
case ATA_ALTADDR_RID:
if (ATA_MASTERDEV(dev)) {
myrid = 0;
start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_ALTOFFSET;
end = start + ATA_ALTIOSIZE - 1;
count = ATA_ALTIOSIZE;
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
}
else {
myrid = 0x14 + 8 * unit;
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
if (res) {
start = rman_get_start(res) + 2;
end = start + ATA_ALTIOSIZE - 1;
count = ATA_ALTIOSIZE;
BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, myrid, res);
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
}
start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_ALTOFFSET-2;
count = 4;
end = start + count - 1;
}
myrid = 0x14 + 8 * unit;
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
break;
}
return res;
}
if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) {
if (ATA_MASTERDEV(dev)) {
if (ATA_MASTERDEV(dev)) {
#ifdef __alpha__
return alpha_platform_alloc_ide_intr(unit);
#else
@ -321,21 +300,13 @@ ata_pci_release_resource(device_t dev, device_t child, int type, int rid,
if (type == SYS_RES_IOPORT) {
switch (rid) {
case ATA_IOADDR_RID:
if (ATA_MASTERDEV(dev))
return BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
SYS_RES_IOPORT, 0x0, r);
else
return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, 0x10 + 8 * unit, r);
return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, 0x10 + 8 * unit, r);
break;
case ATA_ALTADDR_RID:
if (ATA_MASTERDEV(dev))
return BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
SYS_RES_IOPORT, 0x0, r);
else
return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, 0x14 + 8 * unit, r);
return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, 0x14 + 8 * unit, r);
break;
default:
return ENOENT;
@ -352,7 +323,7 @@ ata_pci_release_resource(device_t dev, device_t child, int type, int rid,
return BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
SYS_RES_IRQ, rid, r);
#endif
}
}
else
return 0;
}
@ -431,7 +402,7 @@ ata_pci_allocate(device_t dev, struct ata_channel *ch)
ch->r_io[i].offset = i;
}
ch->r_io[ATA_ALTSTAT].res = altio;
ch->r_io[ATA_ALTSTAT].offset = 0;
ch->r_io[ATA_ALTSTAT].offset = 2;
ch->r_io[ATA_IDX_ADDR].res = io;
if (ctlr->r_res1) {
@ -449,6 +420,9 @@ ata_pci_allocate(device_t dev, struct ata_channel *ch)
else
ctlr->dmainit(ch);
}
ata_generic_hw(ch);
return 0;
}
@ -546,8 +520,6 @@ ata_pcisub_probe(device_t dev)
ch->device[MASTER].setmode = ctlr->setmode;
ch->device[SLAVE].setmode = ctlr->setmode;
ch->locking = ctlr->locking;
if (ch->reset)
ch->reset(ch);
return ata_probe(dev);
}

View File

@ -59,7 +59,8 @@ struct ata_pci_controller {
struct {
void (*function)(void *);
void *argument;
} interrupt[4]; /* SOS max ch# for now XXX */
} interrupt[8]; /* SOS max ch# for now XXX */
void *driver;
};
#define ATA_MASTERDEV(dev) ((pci_get_progif(dev) & 0x80) && \
@ -119,6 +120,10 @@ struct ata_pci_controller {
#define ATA_I82801DB_1 0x24ca8086
#define ATA_I82801EB 0x24db8086
#define ATA_I82801EB_1 0x24d18086
#define ATA_I82801EB_2 0x24df8086
#define ATA_I6300ESB 0x25a28086
#define ATA_I6300ESB_1 0x25a38086
#define ATA_I6300ESB_2 0x25b08086
#define ATA_NATIONAL_ID 0x100b
#define ATA_SC1100 0x0502100b
@ -154,6 +159,7 @@ struct ata_pci_controller {
#define ATA_PDC20619 0x6629105a
#define ATA_PDC20620 0x6620105a
#define ATA_PDC20621 0x6621105a
#define ATA_PDC20622 0x6622105a
#define ATA_SERVERWORKS_ID 0x1166
#define ATA_ROSB4_ISA 0x02001166
@ -253,7 +259,7 @@ struct ata_pci_controller {
#define PRTX 2
#define PRMIO 3
#define PRTX4 0x01
#define PRSX4K 0x02
#define PRSX4X 0x02
#define PRSX6K 0x04
#define PRSATA 0x08
#define PRDUAL 0x10

View File

@ -264,7 +264,8 @@ ata_completed(void *context, int dummy)
ata_prtdev(request->device,
"FAILURE - %s timed out\n",
ata_cmd2str(request));
request->result = EIO;
if (!request->result)
request->result = EIO;
}
}
}
@ -479,7 +480,7 @@ ata_fail_requests(struct ata_channel *ch, struct ata_device *device)
mtx_unlock(&ch->queue_mtx);
/* if we have a request "in flight" fail it as well */
if ((!device || request->device == device) && (request = ch->running)) {
if ((request = ch->running) && (!device || request->device == device)) {
untimeout((timeout_t *)ata_timeout, request, request->timeout_handle);
ATA_UNLOCK_CH(request->device->channel);
request->device->channel->locking(request->device->channel,