Change the operation parameter of bus_dmamap_sync() from an
enum to an int and redefine the BUS_DMASYNC_* constants as flags. This allows us to specify several operations in one call to bus_dmamap_sync() as in NetBSD.
This commit is contained in:
parent
e40db2c46e
commit
141bacb048
@ -787,7 +787,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
}
|
||||
|
||||
void
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
@ -798,28 +798,22 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
* want to add support for invalidating
|
||||
* the caches on broken hardware
|
||||
*/
|
||||
switch (op) {
|
||||
case BUS_DMASYNC_PREWRITE:
|
||||
if (op & BUS_DMASYNC_PREWRITE) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->datavaddr,
|
||||
(void *)bpage->vaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BUS_DMASYNC_POSTREAD:
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->vaddr,
|
||||
(void *)bpage->datavaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
case BUS_DMASYNC_PREREAD:
|
||||
case BUS_DMASYNC_POSTWRITE:
|
||||
/* No-ops */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -480,16 +480,12 @@ struct mbuf;
|
||||
struct uio;
|
||||
|
||||
/*
|
||||
* bus_dmasync_op_t
|
||||
*
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
*/
|
||||
typedef enum {
|
||||
BUS_DMASYNC_PREREAD,
|
||||
BUS_DMASYNC_POSTREAD,
|
||||
BUS_DMASYNC_PREWRITE,
|
||||
BUS_DMASYNC_POSTWRITE
|
||||
} bus_dmasync_op_t;
|
||||
#define BUS_DMASYNC_PREREAD 1
|
||||
#define BUS_DMASYNC_POSTREAD 2
|
||||
#define BUS_DMASYNC_PREWRITE 4
|
||||
#define BUS_DMASYNC_POSTWRITE 8
|
||||
|
||||
/*
|
||||
* bus_dma_tag_t
|
||||
@ -625,7 +621,7 @@ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
/*
|
||||
* Perform a syncronization operation on the given map.
|
||||
*/
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, int);
|
||||
#define bus_dmamap_sync(dmat, dmamap, op) \
|
||||
if ((dmamap) != NULL) \
|
||||
_bus_dmamap_sync(dmat, dmamap, op)
|
||||
|
@ -784,39 +784,32 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
}
|
||||
|
||||
void
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
|
||||
|
||||
/*
|
||||
* Handle data bouncing. We might also
|
||||
* want to add support for invalidating
|
||||
* the caches on broken hardware
|
||||
*/
|
||||
switch (op) {
|
||||
case BUS_DMASYNC_PREWRITE:
|
||||
if (op & BUS_DMASYNC_PREWRITE) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->datavaddr,
|
||||
(void *)bpage->vaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BUS_DMASYNC_POSTREAD:
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->vaddr,
|
||||
(void *)bpage->datavaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
case BUS_DMASYNC_PREREAD:
|
||||
case BUS_DMASYNC_POSTWRITE:
|
||||
/* No-ops */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,16 +89,12 @@ struct mbuf;
|
||||
struct uio;
|
||||
|
||||
/*
|
||||
* bus_dmasync_op_t
|
||||
*
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
*/
|
||||
typedef enum {
|
||||
BUS_DMASYNC_PREREAD,
|
||||
BUS_DMASYNC_POSTREAD,
|
||||
BUS_DMASYNC_PREWRITE,
|
||||
BUS_DMASYNC_POSTWRITE
|
||||
} bus_dmasync_op_t;
|
||||
#define BUS_DMASYNC_PREREAD 1
|
||||
#define BUS_DMASYNC_POSTREAD 2
|
||||
#define BUS_DMASYNC_PREWRITE 4
|
||||
#define BUS_DMASYNC_POSTWRITE 8
|
||||
|
||||
/*
|
||||
* bus_dma_tag_t
|
||||
@ -234,7 +230,7 @@ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
/*
|
||||
* Perform a syncronization operation on the given map.
|
||||
*/
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, int);
|
||||
#define bus_dmamap_sync(dmat, dmamap, op) \
|
||||
if ((dmamap) != NULL) \
|
||||
_bus_dmamap_sync(dmat, dmamap, op)
|
||||
|
@ -784,39 +784,32 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
}
|
||||
|
||||
void
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
|
||||
|
||||
/*
|
||||
* Handle data bouncing. We might also
|
||||
* want to add support for invalidating
|
||||
* the caches on broken hardware
|
||||
*/
|
||||
switch (op) {
|
||||
case BUS_DMASYNC_PREWRITE:
|
||||
if (op & BUS_DMASYNC_PREWRITE) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->datavaddr,
|
||||
(void *)bpage->vaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BUS_DMASYNC_POSTREAD:
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->vaddr,
|
||||
(void *)bpage->datavaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
case BUS_DMASYNC_PREREAD:
|
||||
case BUS_DMASYNC_POSTWRITE:
|
||||
/* No-ops */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,16 +89,12 @@ struct mbuf;
|
||||
struct uio;
|
||||
|
||||
/*
|
||||
* bus_dmasync_op_t
|
||||
*
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
*/
|
||||
typedef enum {
|
||||
BUS_DMASYNC_PREREAD,
|
||||
BUS_DMASYNC_POSTREAD,
|
||||
BUS_DMASYNC_PREWRITE,
|
||||
BUS_DMASYNC_POSTWRITE
|
||||
} bus_dmasync_op_t;
|
||||
#define BUS_DMASYNC_PREREAD 1
|
||||
#define BUS_DMASYNC_POSTREAD 2
|
||||
#define BUS_DMASYNC_PREWRITE 4
|
||||
#define BUS_DMASYNC_POSTWRITE 8
|
||||
|
||||
/*
|
||||
* bus_dma_tag_t
|
||||
@ -234,7 +230,7 @@ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
/*
|
||||
* Perform a syncronization operation on the given map.
|
||||
*/
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, int);
|
||||
#define bus_dmamap_sync(dmat, dmamap, op) \
|
||||
if ((dmamap) != NULL) \
|
||||
_bus_dmamap_sync(dmat, dmamap, op)
|
||||
|
@ -765,7 +765,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
}
|
||||
|
||||
void
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
|
||||
@ -776,28 +776,22 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
* want to add support for invalidating
|
||||
* the caches on broken hardware
|
||||
*/
|
||||
switch (op) {
|
||||
case BUS_DMASYNC_PREWRITE:
|
||||
if (op & BUS_DMASYNC_PREWRITE) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->datavaddr,
|
||||
(void *)bpage->vaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BUS_DMASYNC_POSTREAD:
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
while (bpage != NULL) {
|
||||
bcopy((void *)bpage->vaddr,
|
||||
(void *)bpage->datavaddr,
|
||||
bpage->datacount);
|
||||
bpage = STAILQ_NEXT(bpage, links);
|
||||
}
|
||||
break;
|
||||
case BUS_DMASYNC_PREREAD:
|
||||
case BUS_DMASYNC_POSTWRITE:
|
||||
/* No-ops */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -521,7 +521,7 @@ bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
}
|
||||
|
||||
void
|
||||
bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
|
||||
{
|
||||
|
||||
return;
|
||||
|
@ -905,12 +905,10 @@ memsetw(void *d, int val, size_t size)
|
||||
struct mbuf;
|
||||
struct uio;
|
||||
|
||||
typedef enum {
|
||||
BUS_DMASYNC_PREREAD,
|
||||
BUS_DMASYNC_POSTREAD,
|
||||
BUS_DMASYNC_PREWRITE,
|
||||
BUS_DMASYNC_POSTWRITE,
|
||||
} bus_dmasync_op_t;
|
||||
#define BUS_DMASYNC_PREREAD 1
|
||||
#define BUS_DMASYNC_POSTREAD 2
|
||||
#define BUS_DMASYNC_PREWRITE 4
|
||||
#define BUS_DMASYNC_POSTWRITE 8
|
||||
|
||||
/*
|
||||
* A function that returns 1 if the address cannot be accessed by
|
||||
@ -977,7 +975,7 @@ struct bus_dma_tag {
|
||||
bus_dmamap_t, struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
void (*dt_dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
void (*dt_dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
int);
|
||||
|
||||
/*
|
||||
* DMA memory utility functions.
|
||||
@ -1086,8 +1084,7 @@ sparc64_dmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
|
||||
sparc64_dmamap_unload((t), (t), (p))
|
||||
|
||||
static __inline void
|
||||
sparc64_dmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
|
||||
bus_dmasync_op_t op)
|
||||
sparc64_dmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m, int op)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
|
@ -111,8 +111,7 @@ static int psycho_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
static int psycho_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
static void psycho_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
static void psycho_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
static void psycho_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, int);
|
||||
static int psycho_dmamem_alloc_size(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
bus_dmamap_t *, bus_size_t size);
|
||||
static int psycho_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
@ -1430,7 +1429,7 @@ psycho_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
|
||||
static void
|
||||
psycho_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
bus_dmasync_op_t op)
|
||||
int op)
|
||||
{
|
||||
struct psycho_softc *sc;
|
||||
|
||||
|
@ -242,8 +242,7 @@ static int sbus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
static int sbus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
static void sbus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
static void sbus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
static void sbus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, int);
|
||||
static int sbus_dmamem_alloc_size(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
bus_dmamap_t *, bus_size_t size);
|
||||
static int sbus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
@ -986,7 +985,7 @@ sbus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
|
||||
static void
|
||||
sbus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
bus_dmasync_op_t op)
|
||||
int)
|
||||
{
|
||||
struct sbus_softc *sc = (struct sbus_softc *)pdmat->dt_cookie;
|
||||
|
||||
|
@ -169,8 +169,7 @@ static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, int);
|
||||
static int nexus_dmamem_alloc_size(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
bus_dmamap_t *, u_long size);
|
||||
static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
@ -555,7 +554,7 @@ nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
*/
|
||||
static void
|
||||
nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
bus_dmasync_op_t op)
|
||||
int op)
|
||||
{
|
||||
|
||||
/*
|
||||
@ -563,7 +562,7 @@ nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
*
|
||||
* Actually a #Sync is expensive. We should optimize.
|
||||
*/
|
||||
if ((op == BUS_DMASYNC_PREREAD) || (op == BUS_DMASYNC_PREWRITE)) {
|
||||
if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
|
||||
/*
|
||||
* Don't really need to do anything, but flush any pending
|
||||
* writes anyway.
|
||||
@ -572,12 +571,12 @@ nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
}
|
||||
#if 0
|
||||
/* Should not be needed. */
|
||||
if (op == BUS_DMASYNC_POSTREAD) {
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
ecache_flush((vm_offset_t)map->buf,
|
||||
(vm_offset_t)map->buf + map->buflen - 1);
|
||||
}
|
||||
#endif
|
||||
if (op == BUS_DMASYNC_POSTWRITE) {
|
||||
if (op & BUS_DMASYNC_POSTWRITE) {
|
||||
/* Nothing to do. Handled by the bus controller. */
|
||||
}
|
||||
}
|
||||
|
@ -1011,18 +1011,16 @@ iommu_dvmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
|
||||
|
||||
void
|
||||
iommu_dvmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
|
||||
bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
bus_dmamap_t map, int op)
|
||||
{
|
||||
struct bus_dmamap_res *r;
|
||||
vm_offset_t va;
|
||||
vm_size_t len;
|
||||
|
||||
switch (op) {
|
||||
case BUS_DMASYNC_PREREAD:
|
||||
/* XXX This is probably bogus. */
|
||||
if (op & BUS_DMASYNC_PREREAD)
|
||||
membar(Sync);
|
||||
break;
|
||||
case BUS_DMASYNC_POSTREAD:
|
||||
case BUS_DMASYNC_PREWRITE:
|
||||
if ((op & BUS_DMASYNC_POSTREAD) || (op & BUS_DMASYNC_PREWRITE)) {
|
||||
SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
|
||||
va = (vm_offset_t)BDR_START(r);
|
||||
len = r->dr_used;
|
||||
@ -1034,14 +1032,8 @@ iommu_dvmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is,
|
||||
va += IO_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
if (op == BUS_DMASYNC_PREWRITE)
|
||||
if (op & BUS_DMASYNC_PREWRITE)
|
||||
membar(Sync);
|
||||
break;
|
||||
case BUS_DMASYNC_POSTWRITE:
|
||||
/* Nothing to do. */
|
||||
break;
|
||||
default:
|
||||
panic("iommu_dvmamap_sync: bogus op %d", op);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,16 +89,12 @@ struct mbuf;
|
||||
struct uio;
|
||||
|
||||
/*
|
||||
* bus_dmasync_op_t
|
||||
*
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
* Operations performed by bus_dmamap_sync().
|
||||
*/
|
||||
typedef enum {
|
||||
BUS_DMASYNC_PREREAD,
|
||||
BUS_DMASYNC_POSTREAD,
|
||||
BUS_DMASYNC_PREWRITE,
|
||||
BUS_DMASYNC_POSTWRITE
|
||||
} bus_dmasync_op_t;
|
||||
#define BUS_DMASYNC_PREREAD 1
|
||||
#define BUS_DMASYNC_POSTREAD 2
|
||||
#define BUS_DMASYNC_PREWRITE 4
|
||||
#define BUS_DMASYNC_POSTWRITE 8
|
||||
|
||||
/*
|
||||
* bus_dma_tag_t
|
||||
@ -234,7 +230,7 @@ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
/*
|
||||
* Perform a syncronization operation on the given map.
|
||||
*/
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
|
||||
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, int);
|
||||
#define bus_dmamap_sync(dmat, dmamap, op) \
|
||||
if ((dmamap) != NULL) \
|
||||
_bus_dmamap_sync(dmat, dmamap, op)
|
||||
|
Loading…
Reference in New Issue
Block a user