Reshape the dma code to be a bit more flexible so it can cope with

new HW that has new and different demands.
Fix a few nits in former commit in this cleanup crusade.

Sponsored by:	pair.com
This commit is contained in:
Søren Schmidt 2005-05-03 07:55:07 +00:00
parent af06505ae3
commit eeda55ce8e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=145818
8 changed files with 287 additions and 327 deletions

View File

@ -298,7 +298,7 @@ ata_interrupt(void *data)
mtx_lock(&ch->state_mtx);
do {
/* do we have a running request */
if (ch->state & ATA_TIMEOUT || !(request = ch->running))
if (!(request = ch->running) || (request->flags & ATA_R_TIMEOUT))
break;
ATA_DEBUG_RQ(request, "interrupt");
@ -744,6 +744,46 @@ ata_default_registers(device_t dev)
ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
}
u_int8_t
ata_modify_if_48bit(struct ata_request *request)
{
struct ata_device *atadev = device_get_softc(request->dev);
u_int8_t command = request->u.ata.command;
if ((request->u.ata.lba >= ATA_MAX_28BIT_LBA ||
request->u.ata.count > 256) &&
atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
/* translate command into 48bit version */
switch (command) {
case ATA_READ:
command = ATA_READ48; break;
case ATA_READ_MUL:
command = ATA_READ_MUL48; break;
case ATA_READ_DMA:
command = ATA_READ_DMA48; break;
case ATA_READ_DMA_QUEUED:
command = ATA_READ_DMA_QUEUED48; break;
case ATA_WRITE:
command = ATA_WRITE48; break;
case ATA_WRITE_MUL:
command = ATA_WRITE_MUL48; break;
case ATA_WRITE_DMA:
command = ATA_WRITE_DMA48; break;
case ATA_WRITE_DMA_QUEUED:
command = ATA_WRITE_DMA_QUEUED48; break;
case ATA_FLUSHCACHE:
command = ATA_FLUSHCACHE48; break;
default:
return command;
}
atadev->flags |= ATA_D_48BIT_ACTIVE;
}
else
atadev->flags &= ~ATA_D_48BIT_ACTIVE;
return command;
}
void
ata_udelay(int interval)
{

View File

@ -336,6 +336,7 @@ struct ata_device {
#define ATA_D_USE_CHS 0x0001
#define ATA_D_MEDIA_CHANGED 0x0002
#define ATA_D_ENC_PRESENT 0x0004
#define ATA_D_48BIT_ACTIVE 0x0008
};
/* structure for holding DMA Physical Region Descriptors (PRD) entries */
@ -347,6 +348,7 @@ struct ata_dma_prdentry {
/* structure used by the setprd function */
struct ata_dmasetprd_args {
void *dmatab;
int nsegs;
int error;
};
@ -374,20 +376,21 @@ struct ata_dma {
#define ATA_DMA_LOADED 0x02 /* DMA tables etc loaded */
#define ATA_DMA_ACTIVE 0x04 /* DMA transfer in progress */
void (*alloc)(device_t);
void (*free)(device_t);
void (*alloc)(device_t dev);
void (*free)(device_t dev);
void (*setprd)(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
int (*load)(device_t, caddr_t data, int32_t count,int dir);
int (*unload)(device_t);
int (*start)(device_t);
int (*stop)(device_t);
int (*load)(device_t dev, caddr_t data, int32_t count, int dir, void *addr, int *nsegs);
int (*unload)(device_t dev);
int (*start)(device_t dev);
int (*stop)(device_t dev);
void (*reset)(device_t dev);
};
/* structure holding lowlevel functions */
struct ata_lowlevel {
int (*begin_transaction)(struct ata_request *request);
int (*end_transaction)(struct ata_request *request);
int (*command)(device_t dev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
int (*command)(struct ata_request *request);
};
/* structure holding resources for an ATA channel */
@ -409,7 +412,6 @@ struct ata_channel {
#define ATA_NO_SLAVE 0x01
#define ATA_USE_16BIT 0x02
#define ATA_ATAPI_DMA_RO 0x04
#define ATA_48BIT_ACTIVE 0x08
int devices; /* what is present */
#define ATA_ATA_MASTER 0x01
@ -422,7 +424,6 @@ struct ata_channel {
#define ATA_IDLE 0x0000
#define ATA_ACTIVE 0x0001
#define ATA_STALL_QUEUE 0x0002
#define ATA_TIMEOUT 0x0004
struct mtx queue_mtx; /* queue lock */
TAILQ_HEAD(, ata_request) ata_queue; /* head of ATA queue */
@ -452,6 +453,7 @@ int ata_suspend(device_t dev);
int ata_resume(device_t dev);
int ata_identify(device_t dev);
void ata_default_registers(device_t dev);
u_int8_t ata_modify_if_48bit(struct ata_request *request);
void ata_udelay(int interval);
char *ata_mode2str(int mode);
int ata_pmode(struct ata_params *ap);
@ -473,7 +475,7 @@ char *ata_cmd2str(struct ata_request *request);
/* ata-lowlevel.c: */
void ata_generic_hw(device_t dev);
void ata_generic_reset(device_t dev);
int ata_generic_command(device_t dev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
int ata_generic_command(struct ata_request *request);
/* macros for alloc/free of struct ata_request */
extern uma_zone_t ata_request_zone;

View File

@ -91,11 +91,11 @@ static void ata_promise_mio_intr(void *);
static void ata_promise_sx4_intr(void *);
static void ata_promise_mio_dmainit(device_t);
static void ata_promise_mio_reset(device_t);
static int ata_promise_mio_command(device_t dev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
static int ata_promise_sx4_command(device_t dev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
static int ata_promise_apkt(u_int8_t *bytep, device_t dev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature);
static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt);
static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr);
static int ata_promise_mio_command(struct ata_request *);
static int ata_promise_sx4_command(struct ata_request *);
static int ata_promise_apkt(u_int8_t *, struct ata_request *);
static void ata_promise_queue_hpkt(struct ata_pci_controller *, u_int32_t);
static void ata_promise_next_hpkt(struct ata_pci_controller *);
static void ata_promise_tx2_intr(void *);
static void ata_promise_old_intr(void *);
static void ata_promise_new_dmainit(device_t);
@ -119,7 +119,7 @@ static int ata_via_allocate(device_t dev);
static void ata_via_reset(device_t);
static void ata_via_southbridge_fixup(device_t);
static void ata_via_family_setmode(device_t, int);
static void ata_print_cable(device_t dev, u_int8_t *who);
static void ata_print_cable(device_t, u_int8_t *);
static int ata_atapi(device_t);
static int ata_check_80pin(device_t, int);
static struct ata_chip_id *ata_find_chip(device_t, struct ata_chip_id *, int);
@ -226,7 +226,7 @@ ata_sata_connect(struct ata_channel *ch)
ata_udelay(10000);
}
if (timeout >= 100) {
if (1 | bootverbose)
if (bootverbose)
device_printf(ch->dev, "SATA connect status=%08x\n", status);
return 0;
}
@ -235,9 +235,9 @@ ata_sata_connect(struct ata_channel *ch)
ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
/* find out what type device we got poll for spec'd 31 seconds */
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM);
DELAY(10);
for (timeout = 0; timeout < 3100; timeout++) {
/* XXX SOS 10 secs for now as I have little patience */
ch->devices = 0;
for (timeout = 0; timeout < 1000; timeout++) {
if (ATA_IDX_INB(ch, ATA_STATUS) & ATA_S_BUSY)
DELAY(10000);
else
@ -245,12 +245,17 @@ ata_sata_connect(struct ata_channel *ch)
}
if (1 | bootverbose)
device_printf(ch->dev, "SATA connect ready time=%dms\n", timeout * 10);
if ((ATA_IDX_INB(ch, ATA_CYL_LSB) == ATAPI_MAGIC_LSB) &&
(ATA_IDX_INB(ch, ATA_CYL_MSB) == ATAPI_MAGIC_MSB))
ch->devices = ATA_ATAPI_MASTER;
else
ch->devices = ATA_ATA_MASTER;
if (timeout < 1000) {
if ((ATA_IDX_INB(ch, ATA_CYL_LSB) == ATAPI_MAGIC_LSB) &&
(ATA_IDX_INB(ch, ATA_CYL_MSB) == ATAPI_MAGIC_MSB))
ch->devices = ATA_ATAPI_MASTER;
else /*if ((ATA_IDX_INB(ch, ATA_COUNT) == 0x01) &&
(ATA_IDX_INB(ch, ATA_CYL_LSB) == 0x01)) */
ch->devices = ATA_ATA_MASTER;
}
if (1 | bootverbose)
device_printf(ch->dev, "sata_connect devices=0x%b\n",
ch->devices, "\20\3ATAPI_MASTER\1ATA_MASTER");
return 1;
}
@ -259,8 +264,7 @@ ata_sata_phy_enable(struct ata_channel *ch)
{
int loop, retry;
if ((ATA_IDX_INL(ch, ATA_SCONTROL) &
ATA_SC_DET_MASK) == ATA_SC_DET_IDLE) {
if ((ATA_IDX_INL(ch, ATA_SCONTROL) & ATA_SC_DET_MASK) == ATA_SC_DET_IDLE) {
ata_sata_connect(ch);
return;
}
@ -969,7 +973,6 @@ ata_highpoint_setmode(device_t dev, int mode)
*/
error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0,
ata_limit_mode(dev, mode, ATA_UDMA5));
if (bootverbose)
device_printf(dev, "%ssetting %s on HighPoint chip\n",
(error) ? "FAILURE " : "", ata_mode2str(mode));
@ -1630,12 +1633,7 @@ u_int32_t addr;
struct ata_promise_sx4 {
struct mtx mtx;
#if 0
u_int32_t array[ATA_PDC_MAX_HPKT];
int head, tail;
#else
TAILQ_HEAD(, host_packet) queue;
#endif
int busy;
};
@ -2001,25 +1999,6 @@ ata_promise_sx4_intr(void *data)
}
}
static int
ata_promise_mio_dmastart(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ch->flags |= ATA_DMA_ACTIVE;
return 0;
}
static int
ata_promise_mio_dmastop(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ch->flags &= ~ATA_DMA_ACTIVE;
/* get status XXX SOS */
return 0;
}
static void
ata_promise_mio_dmainit(device_t dev)
{
@ -2027,8 +2006,7 @@ ata_promise_mio_dmainit(device_t dev)
ata_dmainit(dev);
if (ch->dma) {
ch->dma->start = ata_promise_mio_dmastart;
ch->dma->stop = ata_promise_mio_dmastop;
/* note start and stop are not used here */
}
}
@ -2132,18 +2110,18 @@ ata_promise_mio_reset(device_t dev)
}
static int
ata_promise_mio_command(device_t dev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
ata_promise_mio_command(struct ata_request *request)
{
struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev));
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev));
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
u_int32_t *wordp = (u_int32_t *)ch->dma->work;
ATA_OUTL(ctlr->r_res2, (ch->unit + 1) << 2, 0x00000001);
switch (command) {
/* XXX SOS add ATAPI commands support later */
switch (request->u.ata.command) {
default:
return ata_generic_command(dev, command, lba, count, feature);
return ata_generic_command(request);
case ATA_READ_DMA:
wordp[0] = htole32(0x04 | ((ch->unit + 1) << 16) | (0x00 << 24));
@ -2155,25 +2133,25 @@ ata_promise_mio_command(device_t dev, u_int8_t command,
}
wordp[1] = htole32(ch->dma->sg_bus);
wordp[2] = 0;
ata_promise_apkt((u_int8_t*)wordp, dev, command, lba, count, feature);
ata_promise_apkt((u_int8_t*)wordp, request);
ATA_OUTL(ctlr->r_res2, 0x0240 + (ch->unit << 7), ch->dma->work_bus);
return 0;
}
static int
ata_promise_sx4_command(device_t dev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
ata_promise_sx4_command(struct ata_request *request)
{
device_t gparent = GRANDPARENT(dev);
device_t gparent = GRANDPARENT(request->dev);
struct ata_pci_controller *ctlr = device_get_softc(gparent);
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_dma_prdentry *prd = ch->dma->sg;
caddr_t window = rman_get_virtual(ctlr->r_res1);
u_int32_t *wordp;
int i, idx, length = 0;
switch (command) {
/* XXX SOS add ATAPI commands support later */
switch (request->u.ata.command) {
default:
return -1;
@ -2184,7 +2162,7 @@ ata_promise_sx4_command(device_t dev, u_int8_t command,
case ATA_WRITE:
case ATA_WRITE_MUL:
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
return ata_generic_command(dev, command, lba, count, feature);
return ata_generic_command(request);
case ATA_SETFEATURES:
case ATA_FLUSHCACHE:
@ -2195,7 +2173,7 @@ ata_promise_sx4_command(device_t dev, u_int8_t command,
wordp[0] = htole32(0x08 | ((ch->unit + 1)<<16) | (0x00 << 24));
wordp[1] = 0;
wordp[2] = 0;
ata_promise_apkt((u_int8_t *)wordp, dev, command, lba,count,feature);
ata_promise_apkt((u_int8_t *)wordp, request);
ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
@ -2208,27 +2186,26 @@ ata_promise_sx4_command(device_t dev, u_int8_t command,
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HSG_OFFSET);
i = idx = 0;
do {
wordp[idx++] = htole32(prd[i].addr);
wordp[idx++] = htole32(prd[i].count & ~ATA_DMA_EOT);
wordp[idx++] = prd[i].addr;
wordp[idx++] = prd[i].count;
length += (prd[i].count & ~ATA_DMA_EOT);
} while (!(prd[i++].count & ATA_DMA_EOT));
wordp[idx - 1] |= htole32(ATA_DMA_EOT);
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_LSG_OFFSET);
wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE);
wordp[1] = htole32((count * DEV_BSIZE) | ATA_DMA_EOT);
wordp[1] = htole32(request->bytecount | ATA_DMA_EOT);
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_ASG_OFFSET);
wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE);
wordp[1] = htole32((count * DEV_BSIZE) | ATA_DMA_EOT);
wordp[1] = htole32(request->bytecount | ATA_DMA_EOT);
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET);
if (command == ATA_READ_DMA)
if (request->flags & ATA_R_READ)
wordp[0] = htole32(0x14 | ((ch->unit+9)<<16) | ((ch->unit+5)<<24));
if (command == ATA_WRITE_DMA)
if (request->flags & ATA_R_WRITE)
wordp[0] = htole32(0x00 | ((ch->unit+13)<<16) | (0x00<<24));
wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_HSG_OFFSET);
wordp[2] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_LSG_OFFSET);
@ -2236,22 +2213,22 @@ ata_promise_sx4_command(device_t dev, u_int8_t command,
wordp = (u_int32_t *)
(window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET);
if (command == ATA_READ_DMA)
if (request->flags & ATA_R_READ)
wordp[0] = htole32(0x04 | ((ch->unit+5)<<16) | (0x00<<24));
if (command == ATA_WRITE_DMA)
if (request->flags & ATA_R_WRITE)
wordp[0] = htole32(0x10 | ((ch->unit+1)<<16) | ((ch->unit+13)<<24));
wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET);
wordp[2] = 0;
ata_promise_apkt((u_int8_t *)wordp, dev, command, lba,count,feature);
ata_promise_apkt((u_int8_t *)wordp, request);
ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001);
if (command == ATA_READ_DMA) {
if (request->flags & ATA_R_READ) {
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+5)<<2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+9)<<2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET));
}
if (command == ATA_WRITE_DMA) {
if (request->flags & ATA_R_WRITE) {
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+1)<<2), 0x00000001);
ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+13)<<2), 0x00000001);
ata_promise_queue_hpkt(ctlr,
@ -2262,11 +2239,10 @@ ata_promise_sx4_command(device_t dev, u_int8_t command,
}
static int
ata_promise_apkt(u_int8_t *bytep, device_t dev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
ata_promise_apkt(u_int8_t *bytep, struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_device *atadev = device_get_softc(dev);
struct ata_device *atadev = device_get_softc(request->dev);
u_int8_t command;
int i = 12;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_PDC_WAIT_NBUSY|ATA_DRIVE;
@ -2274,46 +2250,41 @@ ata_promise_apkt(u_int8_t *bytep, device_t dev, u_int8_t command,
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_CTL;
bytep[i++] = ATA_A_4BIT;
if ((lba >= ATA_MAX_28BIT_LBA || count > 256) &&
(atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48)) {
ch->flags |= ATA_48BIT_ACTIVE;
if (command == ATA_READ_DMA)
command = ATA_READ_DMA48;
if (command == ATA_WRITE_DMA)
command = ATA_WRITE_DMA48;
command = ata_modify_if_48bit(request);
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_FEATURE;
bytep[i++] = (feature >> 8) & 0xff;
bytep[i++] = feature & 0xff;
bytep[i++] = request->u.ata.feature >> 8;
bytep[i++] = request->u.ata.feature;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_COUNT;
bytep[i++] = (count >> 8) & 0xff;
bytep[i++] = count & 0xff;
bytep[i++] = request->u.ata.count >> 8;
bytep[i++] = request->u.ata.count;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_SECTOR;
bytep[i++] = (lba >> 24) & 0xff;
bytep[i++] = lba & 0xff;
bytep[i++] = request->u.ata.lba >> 24;
bytep[i++] = request->u.ata.lba;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_LSB;
bytep[i++] = (lba >> 32) & 0xff;
bytep[i++] = (lba >> 8) & 0xff;
bytep[i++] = request->u.ata.lba >> 32;
bytep[i++] = request->u.ata.lba >> 8;
bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_MSB;
bytep[i++] = (lba >> 40) & 0xff;
bytep[i++] = (lba >> 16) & 0xff;
bytep[i++] = request->u.ata.lba >> 40;
bytep[i++] = request->u.ata.lba >> 16;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE;
bytep[i++] = ATA_D_LBA | atadev->unit;
}
else {
ch->flags &= ~ATA_48BIT_ACTIVE;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_FEATURE;
bytep[i++] = feature;
bytep[i++] = request->u.ata.feature;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_COUNT;
bytep[i++] = count;
bytep[i++] = request->u.ata.count;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_SECTOR;
bytep[i++] = lba & 0xff;
bytep[i++] = request->u.ata.lba;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_LSB;
bytep[i++] = (lba >> 8) & 0xff;
bytep[i++] = request->u.ata.lba >> 8;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_MSB;
bytep[i++] = (lba >> 16) & 0xff;
bytep[i++] = request->u.ata.lba >> 16;
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE;
bytep[i++] = (atadev->flags & ATA_D_USE_CHS ? 0 : ATA_D_LBA) |
ATA_D_IBM | atadev->unit | ((lba >> 24) & 0xf);
ATA_D_IBM | atadev->unit | ((request->u.ata.lba >> 24)&0xf);
}
bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_END | ATA_COMMAND;
bytep[i++] = command;
@ -2326,14 +2297,6 @@ ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt)
struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev);
mtx_lock(&hpktp->mtx);
#if 0
if (hpktp->tail == hpktp->head && !hpktp->busy) {
ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt);
hpktp->busy = 1;
}
else
hpktp->array[(hpktp->head++) & (ATA_PDC_MAX_HPKT - 1)] = hpkt;
#else
if (hpktp->busy) {
struct host_packet *hp =
malloc(sizeof(struct host_packet), M_TEMP, M_NOWAIT | M_ZERO);
@ -2344,7 +2307,6 @@ ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt)
hpktp->busy = 1;
ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt);
}
#endif
mtx_unlock(&hpktp->mtx);
}
@ -2355,18 +2317,11 @@ ata_promise_next_hpkt(struct ata_pci_controller *ctlr)
struct host_packet *hp;
mtx_lock(&hpktp->mtx);
#if 0
if (hpktp->tail != hpktp->head) {
ATA_OUTL(ctlr->r_res2, 0x000c0100,
hpktp->array[(hpktp->tail++) & (ATA_PDC_MAX_HPKT - 1)]);
}
#else
if ((hp = TAILQ_FIRST(&hpktp->queue))) {
TAILQ_REMOVE(&hpktp->queue, hp, chain);
ATA_OUTL(ctlr->r_res2, 0x000c0100, hp->addr);
free(hp, M_TEMP);
}
#endif
else
hpktp->busy = 0;
mtx_unlock(&hpktp->mtx);
@ -2428,10 +2383,11 @@ ata_promise_old_intr(void *data)
static int
ata_promise_new_dmastart(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev));
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_device *atadev = device_get_softc(dev);
if (ch->flags & ATA_48BIT_ACTIVE) {
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_OUTB(ctlr->r_res1, 0x11,
ATA_INB(ctlr->r_res1, 0x11) | (ch->unit ? 0x08 : 0x02));
ATA_OUTL(ctlr->r_res1, 0x20,
@ -2451,11 +2407,12 @@ ata_promise_new_dmastart(device_t dev)
static int
ata_promise_new_dmastop(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev));
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_device *atadev = device_get_softc(dev);
int error;
if (ch->flags & ATA_48BIT_ACTIVE) {
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_OUTB(ctlr->r_res1, 0x11,
ATA_INB(ctlr->r_res1, 0x11) & ~(ch->unit ? 0x08 : 0x02));
ATA_OUTL(ctlr->r_res1, 0x20, 0);
@ -2463,11 +2420,22 @@ ata_promise_new_dmastop(device_t dev)
error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT);
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
ch->flags &= ~ATA_DMA_ACTIVE;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
ch->flags &= ~ATA_DMA_ACTIVE;
return error;
}
static void
ata_promise_new_dmareset(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
ch->flags &= ~ATA_DMA_ACTIVE;
}
static void
ata_promise_new_dmainit(device_t dev)
{
@ -2477,6 +2445,7 @@ ata_promise_new_dmainit(device_t dev)
if (ch->dma) {
ch->dma->start = ata_promise_new_dmastart;
ch->dma->stop = ata_promise_new_dmastop;
ch->dma->reset = ata_promise_new_dmareset;
}
}

View File

@ -50,7 +50,7 @@ __FBSDID("$FreeBSD$");
static void ata_dmaalloc(device_t);
static void ata_dmafree(device_t);
static void ata_dmasetprd(void *, bus_dma_segment_t *, int, int);
static int ata_dmaload(device_t, caddr_t, int32_t, int);
static int ata_dmaload(device_t, caddr_t, int32_t, int, void *, int *);
static int ata_dmaunload(device_t);
/* local vars */
@ -213,10 +213,12 @@ ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
prd[i].count = htole32(segs[i].ds_len);
}
prd[i - 1].count |= htole32(ATA_DMA_EOT);
args->nsegs = nsegs;
}
static int
ata_dmaload(device_t dev, caddr_t data, int32_t count, int dir)
ata_dmaload(device_t dev, caddr_t data, int32_t count, int dir,
void *addr, int *entries)
{
struct ata_channel *ch = device_get_softc(dev);
struct ata_dmasetprd_args cba;
@ -240,12 +242,14 @@ ata_dmaload(device_t dev, caddr_t data, int32_t count, int dir)
return -1;
}
cba.dmatab = ch->dma->sg;
cba.dmatab = addr;
if (bus_dmamap_load(ch->dma->data_tag, ch->dma->data_map, data, count,
ch->dma->setprd, &cba, 0) || cba.error)
return -1;
*entries = cba.nsegs;
bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map,
@ -260,14 +264,18 @@ int
ata_dmaunload(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map,
(ch->dma->flags & ATA_DMA_READ) != 0 ?
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ch->dma->data_tag, ch->dma->data_map);
if (ch->dma->flags & ATA_DMA_LOADED) {
bus_dmamap_sync(ch->dma->sg_tag, ch->dma->sg_map,
BUS_DMASYNC_POSTWRITE);
ch->dma->cur_iosize = 0;
ch->dma->flags &= ~ATA_DMA_LOADED;
bus_dmamap_sync(ch->dma->data_tag, ch->dma->data_map,
(ch->dma->flags & ATA_DMA_READ) != 0 ?
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ch->dma->data_tag, ch->dma->data_map);
ch->dma->cur_iosize = 0;
ch->dma->flags &= ~ATA_DMA_LOADED;
}
return 0;
}

View File

@ -74,6 +74,7 @@ ata_begin_transaction(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
int dummy;
ATA_DEBUG_RQ(request, "begin transaction");
@ -92,9 +93,7 @@ ata_begin_transaction(struct ata_request *request)
int write = (request->flags & ATA_R_WRITE);
/* issue command */
if (ch->hw.command(request->dev, request->u.ata.command,
request->u.ata.lba, request->u.ata.count,
request->u.ata.feature)) {
if (ch->hw.command(request)) {
device_printf(request->dev, "error issueing %s command\n",
ata_cmd2str(request));
request->result = EIO;
@ -115,8 +114,7 @@ ata_begin_transaction(struct ata_request *request)
/* if write command output the data */
if (write) {
if (ata_wait(ch, atadev,
(ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
if (ata_wait(ch, atadev, (ATA_S_READY | ATA_S_DRQ)) < 0) {
device_printf(request->dev,"timeout waiting for write DRQ");
request->result = EIO;
goto begin_finished;
@ -130,16 +128,14 @@ ata_begin_transaction(struct ata_request *request)
case ATA_R_DMA:
/* check sanity, setup SG list and DMA engine */
if (ch->dma->load(ch->dev, request->data, request->bytecount,
request->flags & ATA_R_READ)) {
request->flags & ATA_R_READ, ch->dma->sg, &dummy)) {
device_printf(request->dev, "setting up DMA failed\n");
request->result = EIO;
goto begin_finished;
}
/* issue command */
if (ch->hw.command(request->dev, request->u.ata.command,
request->u.ata.lba, request->u.ata.count,
request->u.ata.feature)) {
if (ch->hw.command(request)) {
device_printf(request->dev, "error issueing %s command\n",
ata_cmd2str(request));
request->result = EIO;
@ -147,7 +143,7 @@ ata_begin_transaction(struct ata_request *request)
}
/* start DMA engine */
if (ch->dma->start(ch->dev)) {
if (ch->dma->start && ch->dma->start(request->dev)) {
device_printf(request->dev, "error starting DMA\n");
request->result = EIO;
goto begin_finished;
@ -166,44 +162,11 @@ ata_begin_transaction(struct ata_request *request)
}
/* start ATAPI operation */
if (ch->hw.command(request->dev, ATA_PACKET_CMD,
request->transfersize << 8, 0, 0)) {
if (ch->hw.command(request)) {
device_printf(request->dev, "error issuing ATA PACKET command\n");
request->result = EIO;
goto begin_finished;
}
/* command interrupt device ? just return and wait for interrupt */
if ((atadev->param.config & ATA_DRQ_MASK) == ATA_DRQ_INTR)
goto begin_continue;
/* wait for ready to write ATAPI command block */
{
int timeout = 5000; /* might be less for fast devices */
while (timeout--) {
int reason = ATA_IDX_INB(ch, ATA_IREASON);
int status = ATA_IDX_INB(ch, ATA_STATUS);
if (((reason & (ATA_I_CMD | ATA_I_IN)) |
(status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
break;
DELAY(20);
}
if (timeout <= 0) {
device_printf(request->dev,"timeout waiting for ATAPI ready\n");
request->result = EIO;
goto begin_finished;
}
}
/* this seems to be needed for some (slow) devices */
DELAY(10);
/* output actual command block */
ATA_IDX_OUTSW_STRM(ch, ATA_DATA,
(int16_t *)request->u.atapi.ccb,
(atadev->param.config & ATA_PROTO_MASK) ==
ATA_PROTO_ATAPI_12 ? 6 : 8);
goto begin_continue;
/* ATAPI DMA commands */
@ -219,49 +182,21 @@ ata_begin_transaction(struct ata_request *request)
/* check sanity, setup SG list and DMA engine */
if (ch->dma->load(ch->dev, request->data, request->bytecount,
request->flags & ATA_R_READ)) {
request->flags & ATA_R_READ, ch->dma->sg, &dummy)) {
device_printf(request->dev, "setting up DMA failed\n");
request->result = EIO;
goto begin_finished;
}
/* start ATAPI operation */
if (ch->hw.command(request->dev, ATA_PACKET_CMD, 0, 0, ATA_F_DMA)) {
device_printf(request->dev, "error issuing ATAPI packet command\n");
if (ch->hw.command(request)) {
device_printf(request->dev, "error issuing ATA PACKET command\n");
request->result = EIO;
goto begin_finished;
}
/* wait for ready to write ATAPI command block */
{
int timeout = 5000; /* might be less for fast devices */
while (timeout--) {
int reason = ATA_IDX_INB(ch, ATA_IREASON);
int status = ATA_IDX_INB(ch, ATA_STATUS);
if (((reason & (ATA_I_CMD | ATA_I_IN)) |
(status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
break;
DELAY(20);
}
if (timeout <= 0) {
device_printf(request->dev,"timeout waiting for ATAPI ready\n");
request->result = EIO;
goto begin_finished;
}
}
/* this seems to be needed for some (slow) devices */
DELAY(10);
/* output actual command block */
ATA_IDX_OUTSW_STRM(ch, ATA_DATA,
(int16_t *)request->u.atapi.ccb,
(atadev->param.config & ATA_PROTO_MASK) ==
ATA_PROTO_ATAPI_12 ? 6 : 8);
/* start DMA engine */
if (ch->dma->start(ch->dev)) {
if (ch->dma->start && ch->dma->start(request->dev)) {
request->result = EIO;
goto begin_finished;
}
@ -305,7 +240,7 @@ ata_end_transaction(struct ata_request *request)
/* on control commands read back registers to the request struct */
if (request->flags & ATA_R_CONTROL) {
if (ch->flags & ATA_48BIT_ACTIVE) {
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT | ATA_A_HOB);
request->u.ata.count = (ATA_IDX_INB(ch, ATA_COUNT) << 8);
request->u.ata.lba =
@ -340,8 +275,7 @@ ata_end_transaction(struct ata_request *request)
/* if read data get it */
if (request->flags & ATA_R_READ) {
if (ata_wait(ch, atadev,
(ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
if (ata_wait(ch, atadev, (ATA_S_READY | ATA_S_DRQ)) < 0) {
device_printf(request->dev, "timeout waiting for read DRQ");
request->result = EIO;
goto end_finished;
@ -364,8 +298,7 @@ ata_end_transaction(struct ata_request *request)
if (request->flags & ATA_R_WRITE) {
/* if we get an error here we are done with the HW */
if (ata_wait(ch, atadev,
(ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
if (ata_wait(ch, atadev, (ATA_S_READY | ATA_S_DRQ)) < 0) {
device_printf(request->dev,
"timeout waiting for write DRQ");
request->status = ATA_IDX_INB(ch, ATA_STATUS);
@ -390,7 +323,7 @@ ata_end_transaction(struct ata_request *request)
/* stop DMA engine and get status */
if (ch->dma->stop)
request->dmastat = ch->dma->stop(ch->dev);
request->dmastat = ch->dma->stop(request->dev);
/* did we get error or data */
if (request->status & ATA_S_ERROR)
@ -500,9 +433,9 @@ ata_end_transaction(struct ata_request *request)
/* ATAPI DMA commands */
case ATA_R_ATAPI|ATA_R_DMA:
/* stop the engine and get engine status */
/* stop DMA engine and get status */
if (ch->dma->stop)
request->dmastat = ch->dma->stop(ch->dev);
request->dmastat = ch->dma->stop(request->dev);
/* did we get error or data */
if (request->status & (ATA_S_ERROR | ATA_S_DWF))
@ -716,97 +649,112 @@ ata_wait(struct ata_channel *ch, struct ata_device *atadev, u_int8_t mask)
}
int
ata_generic_command(device_t dev, u_int8_t command,
u_int64_t lba, u_int16_t count, u_int16_t feature)
ata_generic_command(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_device *atadev = device_get_softc(dev);
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
/* select device */
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | atadev->unit);
/* ready to issue command ? */
if (ata_wait(ch, atadev, 0) < 0) {
device_printf(dev, "timeout sending command=%02x\n", command);
device_printf(request->dev, "timeout waiting to issue command\n");
return -1;
}
/* enable interrupt */
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
/* only use 48bit addressing if needed (avoid bugs and overhead) */
if ((lba >= ATA_MAX_28BIT_LBA || count > 256) &&
atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
if (request->flags & ATA_R_ATAPI) {
int timeout = 5000;
/* translate command into 48bit version */
switch (command) {
case ATA_READ:
command = ATA_READ48; break;
case ATA_READ_MUL:
command = ATA_READ_MUL48; break;
case ATA_READ_DMA:
command = ATA_READ_DMA48; break;
case ATA_READ_DMA_QUEUED:
command = ATA_READ_DMA_QUEUED48; break;
case ATA_WRITE:
command = ATA_WRITE48; break;
case ATA_WRITE_MUL:
command = ATA_WRITE_MUL48; break;
case ATA_WRITE_DMA:
command = ATA_WRITE_DMA48; break;
case ATA_WRITE_DMA_QUEUED:
command = ATA_WRITE_DMA_QUEUED48; break;
case ATA_FLUSHCACHE:
command = ATA_FLUSHCACHE48; break;
default:
device_printf(dev,"can't translate cmd to 48bit version\n");
return -1;
}
ATA_IDX_OUTB(ch, ATA_FEATURE, (feature>>8) & 0xff);
ATA_IDX_OUTB(ch, ATA_FEATURE, feature & 0xff);
ATA_IDX_OUTB(ch, ATA_COUNT, (count>>8) & 0xff);
ATA_IDX_OUTB(ch, ATA_COUNT, count & 0xff);
ATA_IDX_OUTB(ch, ATA_SECTOR, (lba>>24) & 0xff);
ATA_IDX_OUTB(ch, ATA_SECTOR, lba & 0xff);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, (lba>>32) & 0xff);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, (lba>>8) & 0xff);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, (lba>>40) & 0xff);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, (lba>>16) & 0xff);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | atadev->unit);
ch->flags |= ATA_48BIT_ACTIVE;
/* issue packet command to controller */
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->transfersize);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->transfersize >> 8);
ATA_IDX_OUTB(ch, ATA_COMMAND, ATA_PACKET_CMD);
/* command interrupt device ? just return and wait for interrupt */
if ((atadev->param.config & ATA_DRQ_MASK) == ATA_DRQ_INTR)
return 0;
/* wait for ready to write ATAPI command block */
while (timeout--) {
int reason = ATA_IDX_INB(ch, ATA_IREASON);
int status = ATA_IDX_INB(ch, ATA_STATUS);
if (((reason & (ATA_I_CMD | ATA_I_IN)) |
(status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
break;
DELAY(20);
}
if (timeout <= 0) {
device_printf(request->dev,"timeout waiting for ATAPI ready\n");
request->result = EIO;
return -1;
}
/* this seems to be needed for some (slow) devices */
DELAY(10);
/* output command block */
ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
(atadev->param.config & ATA_PROTO_MASK) ==
ATA_PROTO_ATAPI_12 ? 6 : 8);
}
else {
ATA_IDX_OUTB(ch, ATA_FEATURE, feature);
ATA_IDX_OUTB(ch, ATA_COUNT, count);
if (atadev->flags & ATA_D_USE_CHS) {
int heads, sectors;
u_int8_t command = ata_modify_if_48bit(request);
if (atadev->param.atavalid & ATA_FLAG_54_58) {
heads = atadev->param.current_heads;
sectors = atadev->param.current_sectors;
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature >> 8);
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count >> 8);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba >> 24);
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 32);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 40);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | atadev->unit);
}
else {
command = request->u.ata.command;
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
if (atadev->flags & ATA_D_USE_CHS) {
int heads, sectors;
if (atadev->param.atavalid & ATA_FLAG_54_58) {
heads = atadev->param.current_heads;
sectors = atadev->param.current_sectors;
}
else {
heads = atadev->param.heads;
sectors = atadev->param.sectors;
}
ATA_IDX_OUTB(ch, ATA_SECTOR, (request->u.ata.lba % sectors)+1);
ATA_IDX_OUTB(ch, ATA_CYL_LSB,
(request->u.ata.lba / (sectors * heads)));
ATA_IDX_OUTB(ch, ATA_CYL_MSB,
(request->u.ata.lba / (sectors * heads)) >> 8);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit |
(((request->u.ata.lba% (sectors * heads)) /
sectors) & 0xf));
}
else {
heads = atadev->param.heads;
sectors = atadev->param.sectors;
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE,
ATA_D_IBM | ATA_D_LBA | atadev->unit |
((request->u.ata.lba >> 24) & 0x0f));
}
ATA_IDX_OUTB(ch, ATA_SECTOR, (lba % sectors) + 1);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, (lba / (sectors * heads)));
ATA_IDX_OUTB(ch, ATA_CYL_MSB, (lba / (sectors * heads)) >> 8);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit |
(((lba % (sectors * heads)) / sectors) & 0xf));
}
else {
ATA_IDX_OUTB(ch, ATA_SECTOR, lba & 0xff);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, (lba>>8) & 0xff);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, (lba>>16) & 0xff);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | atadev->unit |
((lba>>24) & 0x0f));
}
ch->flags &= ~ATA_48BIT_ACTIVE;
}
/* issue command to controller */
ATA_IDX_OUTB(ch, ATA_COMMAND, command);
/* issue command to controller */
ATA_IDX_OUTB(ch, ATA_COMMAND, command);
}
return 0;
}

View File

@ -419,7 +419,7 @@ ata_pci_allocate(device_t dev)
static int
ata_pci_dmastart(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) |
(ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR)));
@ -435,7 +435,7 @@ ata_pci_dmastart(device_t dev)
static int
ata_pci_dmastop(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
int error;
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
@ -446,6 +446,18 @@ ata_pci_dmastop(device_t dev)
return error;
}
static void
ata_pci_dmareset(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
ch->dma->flags &= ~ATA_DMA_ACTIVE;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
ch->dma->unload(dev);
}
static void
ata_pci_dmainit(device_t dev)
{
@ -455,6 +467,7 @@ ata_pci_dmainit(device_t dev)
if (ch->dma) {
ch->dma->start = ata_pci_dmastart;
ch->dma->stop = ata_pci_dmastop;
ch->dma->reset = ata_pci_dmareset;
}
}
@ -568,12 +581,11 @@ ata_pcichannel_reset(device_t dev)
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
/* if DMA functionality present stop it */
/* if DMA engine present reset it */
if (ch->dma) {
if (ch->dma->flags & ATA_DMA_ACTIVE)
ch->dma->stop(dev);
if (ch->dma->flags & ATA_DMA_LOADED)
ch->dma->unload(dev);
if (ch->dma->reset)
ch->dma->reset(dev);
ch->dma->unload(dev);
}
/* reset the controller HW */

View File

@ -148,6 +148,7 @@ ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data,
request->transfersize = min(request->bytecount, 65534);
request->flags = flags | ATA_R_ATAPI;
request->timeout = timeout;
request->retries = 0;
ata_queue_request(request);
error = request->result;
ata_free_request(request);
@ -454,8 +455,6 @@ ata_timeout(struct ata_request *request)
*/
if (ch->state == ATA_ACTIVE || ch->state == ATA_STALL_QUEUE) {
request->flags |= ATA_R_TIMEOUT;
ch->state |= ATA_TIMEOUT;
ch->running = NULL;
mtx_unlock(&ch->state_mtx);
ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
ata_finish(request);

View File

@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <geom/geom.h>
#include <dev/ata/ata-all.h>
#include <dev/ata/atapi-cd.h>
#include <dev/ata/ata-commands.h>
#include <ata_if.h>
/* prototypes */
@ -128,6 +129,7 @@ acd_attach(device_t dev)
cdp->block_size = 2048;
device_set_ivars(dev, cdp);
ATA_SETMODE(device_get_parent(dev), dev);
ata_controlcmd(dev, ATA_DEVICE_RESET, 0, 0, 0);
acd_get_cap(dev);
g_post_event(acd_geom_attach, dev, M_WAITOK, NULL);
@ -1000,7 +1002,7 @@ acd_read_toc(device_t dev)
#ifdef ACD_DEBUG
if (cdp->disk_size && cdp->toc.hdr.ending_track) {
device_printd(dev, "(%d sectors (%d bytes)), %d tracks ",
device_printf(dev, "(%d sectors (%d bytes)), %d tracks ",
cdp->disk_size, cdp->block_size,
cdp->toc.hdr.ending_track-cdp->toc.hdr.starting_track+1);
if (cdp->toc.tab[0].control & 4)
@ -1249,9 +1251,6 @@ acd_send_cue(device_t dev, struct cdr_cuesheet *cuesheet)
0, 0, 0, 0, 0, 0, 0 };
int8_t *buffer;
int32_t error;
#ifdef ACD_DEBUG
int i;
#endif
if ((error = acd_mode_sense(dev, ATAPI_CDROM_WRITE_PARAMETERS_PAGE,
(caddr_t)&param, sizeof(param))))
@ -1277,18 +1276,8 @@ acd_send_cue(device_t dev, struct cdr_cuesheet *cuesheet)
if (!(buffer = malloc(cuesheet->len, M_ACD, M_NOWAIT)))
return ENOMEM;
if (!(error = copyin(cuesheet->entries, buffer, cuesheet->len))) {
#ifdef ACD_DEBUG
printf("acd: cuesheet lenght = %d\n", cuesheet->len);
for (i=0; i<cuesheet->len; i++)
if (i%8)
printf(" %02x", buffer[i]);
else
printf("\n%02x", buffer[i]);
printf("\n");
#endif
if (!(error = copyin(cuesheet->entries, buffer, cuesheet->len)))
error = ata_atapicmd(dev, ccb, buffer, cuesheet->len, 0, 30);
}
free(buffer, M_ACD);
return error;
}
@ -1613,9 +1602,6 @@ acd_mode_sense(device_t dev, int page, caddr_t pagebuf, int pagesize)
int error;
error = ata_atapicmd(dev, ccb, pagebuf, pagesize, ATA_R_READ, 10);
#ifdef ACD_DEBUG
atapi_dump("acd: mode sense ", pagebuf, pagesize);
#endif
return error;
}
@ -1625,10 +1611,6 @@ acd_mode_select(device_t dev, caddr_t pagebuf, int pagesize)
int8_t ccb[16] = { ATAPI_MODE_SELECT_BIG, 0x10, 0, 0, 0, 0, 0,
pagesize>>8, pagesize, 0, 0, 0, 0, 0, 0, 0 };
#ifdef ACD_DEBUG
device_printf(dev, "modeselect pagesize=%d\n", pagesize);
atapi_dump("mode select ", pagebuf, pagesize);
#endif
return ata_atapicmd(dev, ccb, pagebuf, pagesize, 0, 30);
}