Further cleanup of the sparc64 busdma implementation:
- Move prototypes for sparc64-specific helper functions from bus.h to bus_private.h - Move the method pointers from struct bus_dma_tag into a separate structure; this saves some memory, and allows to use a single method table for each busdma backend, so that the bus drivers need no longer be changed if the methods tables need to be modified. - Remove the hierarchical tag method lookup. It was never really useful, since the layering is fixed, and the current implementations do not need to call into parent implementations anyway. Each tag inherits its method table pointer and cookie from the parent (or the root tag) now, and the method wrapper macros directly use the method table of the tag. - Add a method table to the non-IOMMU backend, remove unnecessary prototypes, remove the extra parent tag argument. - Rename sparc64_dmamem_alloc_map() and sparc64_dmamem_free_map() to sparc64_dma_alloc_map() and sparc64_dma_free_map(), move them to a better place and use them for all map allocations and deallocations. - Add a method table to the iommu backend, and staticize functions, remove the extra parent tag argument. - Change the psycho and sbus drivers to just set cookie and method table in the root tag. - Miscellaneous small fixes.
This commit is contained in:
parent
5f1d061608
commit
ac5eed50ca
@ -131,15 +131,6 @@ struct bus_space_tag {
|
||||
bus_size_t, bus_size_t, int);
|
||||
};
|
||||
|
||||
/*
|
||||
* Helpers
|
||||
*/
|
||||
int sparc64_bus_mem_map(bus_space_tag_t, bus_space_handle_t, bus_size_t,
|
||||
int, vm_offset_t, void **);
|
||||
int sparc64_bus_mem_unmap(void *, bus_size_t);
|
||||
bus_space_handle_t sparc64_fake_bustag(int, bus_addr_t,
|
||||
struct bus_space_tag *);
|
||||
|
||||
/*
|
||||
* Bus space function prototypes.
|
||||
*/
|
||||
@ -939,10 +930,29 @@ typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
|
||||
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
|
||||
|
||||
/*
|
||||
* bus_dma_tag_t
|
||||
* Method table for a bus_dma_tag.
|
||||
*/
|
||||
struct bus_dma_methods {
|
||||
int (*dm_dmamap_create)(bus_dma_tag_t, int, bus_dmamap_t *);
|
||||
int (*dm_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
|
||||
int (*dm_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
|
||||
bus_size_t, bus_dmamap_callback_t *, void *, int);
|
||||
int (*dm_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
|
||||
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
|
||||
int (*dm_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t, struct uio *,
|
||||
bus_dmamap_callback2_t *, void *, int);
|
||||
void (*dm_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
|
||||
void (*dm_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
int (*dm_dmamem_alloc)(bus_dma_tag_t, void **, int, bus_dmamap_t *);
|
||||
void (*dm_dmamem_free)(bus_dma_tag_t, void *, bus_dmamap_t);
|
||||
};
|
||||
|
||||
/*
|
||||
* bus_dma_tag_t
|
||||
*
|
||||
* A machine-dependent opaque type describing the implementation of
|
||||
* DMA for a given bus.
|
||||
* A machine-dependent opaque type describing the implementation of
|
||||
* DMA for a given bus.
|
||||
*/
|
||||
struct bus_dma_tag {
|
||||
void *dt_cookie; /* cookie used in the guts */
|
||||
@ -960,162 +970,32 @@ struct bus_dma_tag {
|
||||
int dt_ref_count;
|
||||
int dt_map_count;
|
||||
|
||||
/*
|
||||
* DMA mapping methods.
|
||||
*/
|
||||
int (*dt_dmamap_create)(bus_dma_tag_t, bus_dma_tag_t, int,
|
||||
bus_dmamap_t *);
|
||||
int (*dt_dmamap_destroy)(bus_dma_tag_t, bus_dma_tag_t,
|
||||
bus_dmamap_t);
|
||||
int (*dt_dmamap_load)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
|
||||
int (*dt_dmamap_load_mbuf)(bus_dma_tag_t, bus_dma_tag_t,
|
||||
bus_dmamap_t, struct mbuf *, bus_dmamap_callback2_t *, void *, int);
|
||||
int (*dt_dmamap_load_uio)(bus_dma_tag_t, bus_dma_tag_t,
|
||||
bus_dmamap_t, struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
void (*dt_dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
void (*dt_dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
|
||||
/*
|
||||
* DMA memory utility functions.
|
||||
*/
|
||||
int (*dt_dmamem_alloc)(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
bus_dmamap_t *);
|
||||
void (*dt_dmamem_free)(bus_dma_tag_t, bus_dma_tag_t, void *,
|
||||
bus_dmamap_t);
|
||||
struct bus_dma_methods *dt_mt;
|
||||
};
|
||||
|
||||
/*
|
||||
* XXX: This is a kluge. It would be better to handle dma tags in a hierarchical
|
||||
* way, and have a BUS_GET_DMA_TAG(); however, since this is not currently the
|
||||
* case, save a root tag in the relevant bus attach function and use that.
|
||||
* Keep the hierarchical structure, it might become needed in the future.
|
||||
*/
|
||||
extern bus_dma_tag_t sparc64_root_dma_tag;
|
||||
|
||||
int bus_dma_tag_create(bus_dma_tag_t, bus_size_t, bus_size_t, bus_addr_t,
|
||||
bus_addr_t, bus_dma_filter_t *, void *, bus_size_t, int, bus_size_t,
|
||||
int, bus_dma_tag_t *);
|
||||
|
||||
int bus_dma_tag_destroy(bus_dma_tag_t);
|
||||
|
||||
int sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp);
|
||||
void sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map);
|
||||
|
||||
static __inline int
|
||||
sparc64_dmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, int f,
|
||||
bus_dmamap_t *p)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_create == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
return ((*lt->dt_dmamap_create)(lt, dt, f, p));
|
||||
}
|
||||
#define bus_dmamap_create(t, f, p) \
|
||||
sparc64_dmamap_create((t), (t), (f), (p))
|
||||
|
||||
static __inline int
|
||||
sparc64_dmamap_destroy(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_destroy == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
return ((*lt->dt_dmamap_destroy)(lt, dt, p));
|
||||
}
|
||||
((t)->dt_mt->dm_dmamap_create((t), (f), (p)))
|
||||
#define bus_dmamap_destroy(t, p) \
|
||||
sparc64_dmamap_destroy((t), (t), (p))
|
||||
|
||||
static __inline int
|
||||
sparc64_dmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
|
||||
void *p, bus_size_t s, bus_dmamap_callback_t *cb, void *cba, int f)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_load == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
return ((*lt->dt_dmamap_load)(lt, dt, m, p, s, cb, cba, f));
|
||||
}
|
||||
((t)->dt_mt->dm_dmamap_destroy((t), (p)))
|
||||
#define bus_dmamap_load(t, m, p, s, cb, cba, f) \
|
||||
sparc64_dmamap_load((t), (t), (m), (p), (s), (cb), (cba), (f))
|
||||
|
||||
static __inline int
|
||||
sparc64_dmamap_load_mbuf(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
|
||||
struct mbuf *mb, bus_dmamap_callback2_t *cb, void *cba, int f)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_load_mbuf == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
return ((*lt->dt_dmamap_load_mbuf)(lt, dt, m, mb, cb, cba, f));
|
||||
}
|
||||
#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \
|
||||
sparc64_dmamap_load_mbuf((t), (t), (m), (mb), (cb), (cba), (f))
|
||||
|
||||
static __inline int
|
||||
sparc64_dmamap_load_uio(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
|
||||
struct uio *ui, bus_dmamap_callback2_t *cb, void *cba, int f)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_load_uio == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
return ((*lt->dt_dmamap_load_uio)(lt, dt, m, ui, cb, cba, f));
|
||||
}
|
||||
#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \
|
||||
sparc64_dmamap_load_uio((t), (t), (m), (ui), (cb), (cba), (f))
|
||||
|
||||
static __inline void
|
||||
sparc64_dmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_unload == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
(*lt->dt_dmamap_unload)(lt, dt, p);
|
||||
}
|
||||
((t)->dt_mt->dm_dmamap_load((t), (m), (p), (s), (cb), (cba), (f)))
|
||||
#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \
|
||||
((t)->dt_mt->dm_dmamap_load_mbuf((t), (m), (mb), (cb), (cba), (f)))
|
||||
#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \
|
||||
((t)->dt_mt->dm_dmamap_load_uio((t), (m), (ui), (cb), (cba), (f)))
|
||||
#define bus_dmamap_unload(t, p) \
|
||||
sparc64_dmamap_unload((t), (t), (p))
|
||||
|
||||
static __inline void
|
||||
sparc64_dmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m,
|
||||
bus_dmasync_op_t op)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamap_sync == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
(*lt->dt_dmamap_sync)(lt, dt, m, op);
|
||||
}
|
||||
((t)->dt_mt->dm_dmamap_unload((t), (p)))
|
||||
#define bus_dmamap_sync(t, m, op) \
|
||||
sparc64_dmamap_sync((t), (t), (m), (op))
|
||||
|
||||
static __inline int
|
||||
sparc64_dmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, void **v, int f,
|
||||
bus_dmamap_t *m)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamem_alloc == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
return ((*lt->dt_dmamem_alloc)(lt, dt, v, f, m));
|
||||
}
|
||||
((t)->dt_mt->dm_dmamap_sync((t), (m), (op)))
|
||||
#define bus_dmamem_alloc(t, v, f, m) \
|
||||
sparc64_dmamem_alloc((t), (t), (v), (f), (m))
|
||||
|
||||
static __inline void
|
||||
sparc64_dmamem_free(bus_dma_tag_t pt, bus_dma_tag_t dt, void *v,
|
||||
bus_dmamap_t m)
|
||||
{
|
||||
bus_dma_tag_t lt;
|
||||
|
||||
for (lt = pt; lt->dt_dmamem_free == NULL; lt = lt->dt_parent)
|
||||
;
|
||||
(*lt->dt_dmamem_free)(lt, dt, v, m);
|
||||
}
|
||||
((t)->dt_mt->dm_dmamem_alloc((t), (v), (f), (m)))
|
||||
#define bus_dmamem_free(t, v, m) \
|
||||
sparc64_dmamem_free((t), (t), (v), (m))
|
||||
((t)->dt_mt->dm_dmamem_free((t), (v), (m)))
|
||||
|
||||
#endif /* !_MACHINE_BUS_H_ */
|
||||
|
@ -33,6 +33,14 @@
|
||||
|
||||
#include <sys/queue.h>
|
||||
|
||||
/*
|
||||
* Helpers
|
||||
*/
|
||||
int sparc64_bus_mem_map(bus_space_tag_t, bus_space_handle_t, bus_size_t,
|
||||
int, vm_offset_t, void **);
|
||||
int sparc64_bus_mem_unmap(void *, bus_size_t);
|
||||
bus_space_handle_t sparc64_fake_bustag(int, bus_addr_t, struct bus_space_tag *);
|
||||
|
||||
/*
|
||||
* This is more or less arbitrary, except for the stack space consumed by
|
||||
* the segments array. Choose more than ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1),
|
||||
@ -53,10 +61,14 @@ struct bus_dmamap {
|
||||
int dm_loaded;
|
||||
};
|
||||
|
||||
static __inline void
|
||||
sparc64_dmamap_init(struct bus_dmamap *m)
|
||||
{
|
||||
SLIST_INIT(&m->dm_reslist);
|
||||
}
|
||||
int sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp);
|
||||
void sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map);
|
||||
|
||||
/*
|
||||
* XXX: This is a kluge. It would be better to handle dma tags in a hierarchical
|
||||
* way, and have a BUS_GET_DMA_TAG(); however, since this is not currently the
|
||||
* case, save a root tag in the relevant bus attach function and use that.
|
||||
*/
|
||||
extern bus_dma_tag_t sparc64_root_dma_tag;
|
||||
|
||||
#endif /* !_MACHINE_BUS_PRIVATE_H_ */
|
||||
|
@ -81,18 +81,6 @@ void iommu_enter(struct iommu_state *, vm_offset_t, vm_paddr_t, int);
|
||||
void iommu_remove(struct iommu_state *, vm_offset_t, size_t);
|
||||
void iommu_decode_fault(struct iommu_state *, vm_offset_t);
|
||||
|
||||
int iommu_dvmamap_create(bus_dma_tag_t, bus_dma_tag_t, int, bus_dmamap_t *);
|
||||
int iommu_dvmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
int iommu_dvmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
|
||||
bus_size_t, bus_dmamap_callback_t *, void *, int);
|
||||
int iommu_dvmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
|
||||
int iommu_dvmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
void iommu_dvmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
void iommu_dvmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, int);
|
||||
int iommu_dvmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
bus_dmamap_t *);
|
||||
void iommu_dvmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *, bus_dmamap_t);
|
||||
extern struct bus_dma_methods iommu_dma_methods;
|
||||
|
||||
#endif /* !_MACHINE_IOMMUVAR_H_ */
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <ofw/ofw_pci.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
#include <machine/bus_private.h>
|
||||
#include <machine/iommureg.h>
|
||||
#include <machine/bus_common.h>
|
||||
#include <machine/frame.h>
|
||||
@ -546,15 +547,7 @@ psycho_attach(device_t dev)
|
||||
panic("psycho_attach: bus_dma_tag_create failed");
|
||||
/* Customize the tag. */
|
||||
sc->sc_dmat->dt_cookie = sc->sc_is;
|
||||
sc->sc_dmat->dt_dmamap_create = iommu_dvmamap_create;
|
||||
sc->sc_dmat->dt_dmamap_destroy = iommu_dvmamap_destroy;
|
||||
sc->sc_dmat->dt_dmamap_load = iommu_dvmamap_load;
|
||||
sc->sc_dmat->dt_dmamap_load_mbuf = iommu_dvmamap_load_mbuf;
|
||||
sc->sc_dmat->dt_dmamap_load_uio = iommu_dvmamap_load_uio;
|
||||
sc->sc_dmat->dt_dmamap_unload = iommu_dvmamap_unload;
|
||||
sc->sc_dmat->dt_dmamap_sync = iommu_dvmamap_sync;
|
||||
sc->sc_dmat->dt_dmamem_alloc = iommu_dvmamem_alloc;
|
||||
sc->sc_dmat->dt_dmamem_free = iommu_dvmamem_free;
|
||||
sc->sc_dmat->dt_mt = &iommu_dma_methods;
|
||||
/* XXX: register as root dma tag (kludge). */
|
||||
sparc64_root_dma_tag = sc->sc_dmat;
|
||||
|
||||
|
@ -117,6 +117,7 @@
|
||||
#include <ofw/openfirm.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
#include <machine/bus_private.h>
|
||||
#include <machine/iommureg.h>
|
||||
#include <machine/bus_common.h>
|
||||
#include <machine/frame.h>
|
||||
@ -396,15 +397,7 @@ sbus_probe(device_t dev)
|
||||
panic("bus_dma_tag_create failed");
|
||||
/* Customize the tag. */
|
||||
sc->sc_cdmatag->dt_cookie = &sc->sc_is;
|
||||
sc->sc_cdmatag->dt_dmamap_create = iommu_dvmamap_create;
|
||||
sc->sc_cdmatag->dt_dmamap_destroy = iommu_dvmamap_destroy;
|
||||
sc->sc_cdmatag->dt_dmamap_load = iommu_dvmamap_load;
|
||||
sc->sc_cdmatag->dt_dmamap_load_mbuf = iommu_dvmamap_load_mbuf;
|
||||
sc->sc_cdmatag->dt_dmamap_load_uio = iommu_dvmamap_load_uio;
|
||||
sc->sc_cdmatag->dt_dmamap_unload = iommu_dvmamap_unload;
|
||||
sc->sc_cdmatag->dt_dmamap_sync = iommu_dvmamap_sync;
|
||||
sc->sc_cdmatag->dt_dmamem_alloc = iommu_dvmamem_alloc;
|
||||
sc->sc_cdmatag->dt_dmamem_free = iommu_dvmamem_free;
|
||||
sc->sc_cdmatag->dt_mt = &iommu_dma_methods;
|
||||
/* XXX: register as root dma tag (kludge). */
|
||||
sparc64_root_dma_tag = sc->sc_cdmatag;
|
||||
|
||||
|
@ -154,28 +154,6 @@ int bus_stream_asi[] = {
|
||||
0
|
||||
};
|
||||
|
||||
/*
|
||||
* busdma support code.
|
||||
* Note: there is no support for bounce buffers yet.
|
||||
*/
|
||||
|
||||
static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
|
||||
bus_dmamap_t *);
|
||||
static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
|
||||
static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
|
||||
static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
struct uio *, bus_dmamap_callback2_t *, void *, int);
|
||||
static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
|
||||
static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dmasync_op_t);
|
||||
static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
|
||||
bus_dmamap_t *);
|
||||
static void nexus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
|
||||
bus_dmamap_t);
|
||||
|
||||
/*
|
||||
* Since there is now way for a device to obtain a dma tag from its parent
|
||||
* we use this kluge to handle different the different supported bus systems.
|
||||
@ -193,7 +171,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
|
||||
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
|
||||
{
|
||||
|
||||
bus_dma_tag_t impptag;
|
||||
bus_dma_tag_t newtag;
|
||||
|
||||
/* Return a NULL tag on failure */
|
||||
@ -203,7 +181,15 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
if (newtag == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
newtag->dt_parent = parent != NULL ? parent : sparc64_root_dma_tag;
|
||||
impptag = parent != NULL ? parent : sparc64_root_dma_tag;
|
||||
/*
|
||||
* The method table pointer and the cookie need to be taken over from
|
||||
* the parent or the root tag.
|
||||
*/
|
||||
newtag->dt_cookie = impptag->dt_cookie;
|
||||
newtag->dt_mt = impptag->dt_mt;
|
||||
|
||||
newtag->dt_parent = parent;
|
||||
newtag->dt_alignment = alignment;
|
||||
newtag->dt_boundary = boundary;
|
||||
newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
|
||||
@ -218,16 +204,6 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
newtag->dt_ref_count = 1; /* Count ourselves */
|
||||
newtag->dt_map_count = 0;
|
||||
|
||||
newtag->dt_dmamap_create = NULL;
|
||||
newtag->dt_dmamap_destroy = NULL;
|
||||
newtag->dt_dmamap_load = NULL;
|
||||
newtag->dt_dmamap_load_mbuf = NULL;
|
||||
newtag->dt_dmamap_load_uio = NULL;
|
||||
newtag->dt_dmamap_unload = NULL;
|
||||
newtag->dt_dmamap_sync = NULL;
|
||||
newtag->dt_dmamem_alloc = NULL;
|
||||
newtag->dt_dmamem_free = NULL;
|
||||
|
||||
/* Take into account any restrictions imposed by our parent tag */
|
||||
if (parent != NULL) {
|
||||
newtag->dt_lowaddr = ulmin(parent->dt_lowaddr,
|
||||
@ -240,8 +216,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
*/
|
||||
newtag->dt_boundary = ulmin(parent->dt_boundary,
|
||||
newtag->dt_boundary);
|
||||
atomic_add_int(&parent->dt_ref_count, 1);
|
||||
}
|
||||
atomic_add_int(&newtag->dt_parent->dt_ref_count, 1);
|
||||
|
||||
*dmat = newtag;
|
||||
return (0);
|
||||
@ -273,34 +249,40 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA map creation. May be called by bus-specific
|
||||
* DMA map creation functions.
|
||||
*/
|
||||
static int
|
||||
nexus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
|
||||
bus_dmamap_t *mapp)
|
||||
/* Allocate/free a tag, and do the necessary management work. */
|
||||
int
|
||||
sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
|
||||
{
|
||||
|
||||
*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (*mapp != NULL) {
|
||||
ddmat->dt_map_count++;
|
||||
sparc64_dmamap_init(*mapp);
|
||||
return (0);
|
||||
} else
|
||||
if (*mapp == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
SLIST_INIT(&(*mapp)->dm_reslist);
|
||||
dmat->dt_map_count++;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA map destruction. May be called by bus-specific
|
||||
* DMA map destruction functions.
|
||||
*/
|
||||
static int
|
||||
nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
void
|
||||
sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
|
||||
free(map, M_DEVBUF);
|
||||
ddmat->dt_map_count--;
|
||||
dmat->dt_map_count--;
|
||||
}
|
||||
|
||||
static int
|
||||
nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
{
|
||||
|
||||
return (sparc64_dma_alloc_map(dmat, mapp));
|
||||
}
|
||||
|
||||
static int
|
||||
nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
|
||||
sparc64_dma_free_map(dmat, map);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -311,7 +293,7 @@ nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
* first indicates if this is the first invocation of this function.
|
||||
*/
|
||||
static int
|
||||
_nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[],
|
||||
_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
|
||||
void *buf, bus_size_t buflen, struct thread *td, int flags,
|
||||
bus_addr_t *lastaddrp, int *segp, int first)
|
||||
{
|
||||
@ -327,7 +309,7 @@ _nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[],
|
||||
pmap = NULL;
|
||||
|
||||
lastaddr = *lastaddrp;
|
||||
bmask = ~(ddmat->dt_boundary - 1);
|
||||
bmask = ~(dmat->dt_boundary - 1);
|
||||
|
||||
for (seg = *segp; buflen > 0 ; ) {
|
||||
/*
|
||||
@ -348,8 +330,8 @@ _nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[],
|
||||
/*
|
||||
* Make sure we don't cross any boundaries.
|
||||
*/
|
||||
if (ddmat->dt_boundary > 0) {
|
||||
baddr = (curaddr + ddmat->dt_boundary) & bmask;
|
||||
if (dmat->dt_boundary > 0) {
|
||||
baddr = (curaddr + dmat->dt_boundary) & bmask;
|
||||
if (sgsize > (baddr - curaddr))
|
||||
sgsize = (baddr - curaddr);
|
||||
}
|
||||
@ -364,12 +346,12 @@ _nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[],
|
||||
first = 0;
|
||||
} else {
|
||||
if (curaddr == lastaddr &&
|
||||
(segs[seg].ds_len + sgsize) <= ddmat->dt_maxsegsz &&
|
||||
(ddmat->dt_boundary == 0 ||
|
||||
(segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
|
||||
(dmat->dt_boundary == 0 ||
|
||||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
|
||||
segs[seg].ds_len += sgsize;
|
||||
else {
|
||||
if (++seg >= ddmat->dt_nsegments)
|
||||
if (++seg >= dmat->dt_nsegments)
|
||||
break;
|
||||
segs[seg].ds_addr = curaddr;
|
||||
segs[seg].ds_len = sgsize;
|
||||
@ -401,19 +383,19 @@ _nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[],
|
||||
* bypass DVMA.
|
||||
*/
|
||||
static int
|
||||
nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
|
||||
void *callback_arg, int flags)
|
||||
nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
|
||||
int flags)
|
||||
{
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t dm_segments[ddmat->dt_nsegments];
|
||||
bus_dma_segment_t dm_segments[dmat->dt_nsegments];
|
||||
#else
|
||||
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
|
||||
#endif
|
||||
bus_addr_t lastaddr;
|
||||
int error, nsegs;
|
||||
|
||||
error = _nexus_dmamap_load_buffer(ddmat, dm_segments, buf, buflen,
|
||||
error = _nexus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
|
||||
NULL, flags, &lastaddr, &nsegs, 1);
|
||||
|
||||
if (error == 0) {
|
||||
@ -429,12 +411,11 @@ nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
* Like nexus_dmamap_load(), but for mbufs.
|
||||
*/
|
||||
static int
|
||||
nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback,
|
||||
void *callback_arg, int flags)
|
||||
nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
|
||||
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
|
||||
{
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t dm_segments[ddmat->dt_nsegments];
|
||||
bus_dma_segment_t dm_segments[dmat->dt_nsegments];
|
||||
#else
|
||||
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
|
||||
#endif
|
||||
@ -444,14 +425,14 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
|
||||
nsegs = 0;
|
||||
error = 0;
|
||||
if (m0->m_pkthdr.len <= ddmat->dt_maxsize) {
|
||||
if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
|
||||
int first = 1;
|
||||
bus_addr_t lastaddr = 0;
|
||||
struct mbuf *m;
|
||||
|
||||
for (m = m0; m != NULL && error == 0; m = m->m_next) {
|
||||
if (m->m_len > 0) {
|
||||
error = _nexus_dmamap_load_buffer(ddmat,
|
||||
error = _nexus_dmamap_load_buffer(dmat,
|
||||
dm_segments, m->m_data, m->m_len, NULL,
|
||||
flags, &lastaddr, &nsegs, first);
|
||||
first = 0;
|
||||
@ -476,13 +457,12 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
* Like nexus_dmamap_load(), but for uios.
|
||||
*/
|
||||
static int
|
||||
nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback,
|
||||
void *callback_arg, int flags)
|
||||
nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
|
||||
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
|
||||
{
|
||||
bus_addr_t lastaddr;
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t dm_segments[ddmat->dt_nsegments];
|
||||
bus_dma_segment_t dm_segments[dmat->dt_nsegments];
|
||||
#else
|
||||
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
|
||||
#endif
|
||||
@ -513,7 +493,7 @@ nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
caddr_t addr = (caddr_t) iov[i].iov_base;
|
||||
|
||||
if (minlen > 0) {
|
||||
error = _nexus_dmamap_load_buffer(ddmat, dm_segments,
|
||||
error = _nexus_dmamap_load_buffer(dmat, dm_segments,
|
||||
addr, minlen, td, flags, &lastaddr, &nsegs, first);
|
||||
first = 0;
|
||||
|
||||
@ -537,7 +517,7 @@ nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
* bus-specific DMA map unload functions.
|
||||
*/
|
||||
static void
|
||||
nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
|
||||
map->dm_loaded = 0;
|
||||
@ -548,8 +528,7 @@ nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
* by bus-specific DMA map synchronization functions.
|
||||
*/
|
||||
static void
|
||||
nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
bus_dmasync_op_t op)
|
||||
nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
{
|
||||
|
||||
/*
|
||||
@ -576,46 +555,17 @@ nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper functions for buses that use their private dmamem_alloc/dmamem_free
|
||||
* versions.
|
||||
* These differ from the dmamap_alloc() functions in that they create a tag
|
||||
* that is specifically for use with dmamem_alloc'ed memory.
|
||||
* These are primitive now, but I expect that some fields of the map will need
|
||||
* to be filled soon.
|
||||
*/
|
||||
int
|
||||
sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
|
||||
{
|
||||
|
||||
*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (*mapp == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
dmat->dt_map_count++;
|
||||
sparc64_dmamap_init(*mapp);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
|
||||
free(map, M_DEVBUF);
|
||||
dmat->dt_map_count--;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA-safe memory allocation. May be called
|
||||
* by bus-specific DMA memory allocation functions.
|
||||
*/
|
||||
static int
|
||||
nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
|
||||
int flags, bus_dmamap_t *mapp)
|
||||
nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
||||
bus_dmamap_t *mapp)
|
||||
{
|
||||
|
||||
if ((ddmat->dt_maxsize <= PAGE_SIZE)) {
|
||||
*vaddr = malloc(ddmat->dt_maxsize, M_DEVBUF,
|
||||
if ((dmat->dt_maxsize <= PAGE_SIZE)) {
|
||||
*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF,
|
||||
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
|
||||
} else {
|
||||
/*
|
||||
@ -624,17 +574,15 @@ nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
|
||||
* allocations yet though.
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
*vaddr = contigmalloc(ddmat->dt_maxsize, M_DEVBUF,
|
||||
*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF,
|
||||
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
|
||||
0ul, ddmat->dt_lowaddr,
|
||||
ddmat->dt_alignment ? ddmat->dt_alignment : 1UL,
|
||||
ddmat->dt_boundary);
|
||||
0ul, dmat->dt_lowaddr,
|
||||
dmat->dt_alignment ? dmat->dt_alignment : 1UL,
|
||||
dmat->dt_boundary);
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
if (*vaddr == NULL) {
|
||||
free(*mapp, M_DEVBUF);
|
||||
if (*vaddr == NULL)
|
||||
return (ENOMEM);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -643,20 +591,30 @@ nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
|
||||
* bus-specific DMA memory free functions.
|
||||
*/
|
||||
static void
|
||||
nexus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
|
||||
bus_dmamap_t map)
|
||||
nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
{
|
||||
|
||||
sparc64_dmamem_free_map(ddmat, map);
|
||||
if ((ddmat->dt_maxsize <= PAGE_SIZE))
|
||||
if ((dmat->dt_maxsize <= PAGE_SIZE))
|
||||
free(vaddr, M_DEVBUF);
|
||||
else {
|
||||
mtx_lock(&Giant);
|
||||
contigfree(vaddr, ddmat->dt_maxsize, M_DEVBUF);
|
||||
contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
}
|
||||
|
||||
struct bus_dma_methods nexus_dma_methods = {
|
||||
nexus_dmamap_create,
|
||||
nexus_dmamap_destroy,
|
||||
nexus_dmamap_load,
|
||||
nexus_dmamap_load_mbuf,
|
||||
nexus_dmamap_load_uio,
|
||||
nexus_dmamap_unload,
|
||||
nexus_dmamap_sync,
|
||||
nexus_dmamem_alloc,
|
||||
nexus_dmamem_free,
|
||||
};
|
||||
|
||||
struct bus_dma_tag nexus_dmatag = {
|
||||
NULL,
|
||||
NULL,
|
||||
@ -672,16 +630,7 @@ struct bus_dma_tag nexus_dmatag = {
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
nexus_dmamap_create,
|
||||
nexus_dmamap_destroy,
|
||||
nexus_dmamap_load,
|
||||
nexus_dmamap_load_mbuf,
|
||||
nexus_dmamap_load_uio,
|
||||
nexus_dmamap_unload,
|
||||
nexus_dmamap_sync,
|
||||
|
||||
nexus_dmamem_alloc,
|
||||
nexus_dmamem_free,
|
||||
&nexus_dma_methods,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -667,23 +667,23 @@ iommu_dvma_vallocseg(bus_dma_tag_t dt, struct iommu_state *is, bus_dmamap_t map,
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_dvmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, void **vaddr,
|
||||
int flags, bus_dmamap_t *mapp)
|
||||
static int
|
||||
iommu_dvmamem_alloc(bus_dma_tag_t dt, void **vaddr, int flags,
|
||||
bus_dmamap_t *mapp)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* XXX: This will break for 32 bit transfers on machines with more than
|
||||
* 16G (1 << 34 bytes) of memory.
|
||||
*/
|
||||
if ((error = sparc64_dmamem_alloc_map(dt, mapp)) != 0)
|
||||
if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
|
||||
return (error);
|
||||
if ((*vaddr = malloc(dt->dt_maxsize, M_IOMMU,
|
||||
(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
|
||||
error = ENOMEM;
|
||||
sparc64_dmamem_free_map(dt, *mapp);
|
||||
sparc64_dma_free_map(dt, *mapp);
|
||||
return (error);
|
||||
}
|
||||
iommu_map_insq(*mapp);
|
||||
@ -695,26 +695,24 @@ iommu_dvmamem_alloc(bus_dma_tag_t pt, bus_dma_tag_t dt, void **vaddr,
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_dvmamem_free(bus_dma_tag_t pt, bus_dma_tag_t dt, void *vaddr,
|
||||
bus_dmamap_t map)
|
||||
static void
|
||||
iommu_dvmamem_free(bus_dma_tag_t dt, void *vaddr, bus_dmamap_t map)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
|
||||
iommu_dvma_vfree(is, map);
|
||||
sparc64_dmamem_free_map(dt, map);
|
||||
sparc64_dma_free_map(dt, map);
|
||||
free(vaddr, M_IOMMU);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_dvmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, int flags,
|
||||
bus_dmamap_t *mapp)
|
||||
static int
|
||||
iommu_dvmamap_create(bus_dma_tag_t dt, int flags, bus_dmamap_t *mapp)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
bus_size_t totsz, presz, currsz;
|
||||
int error, i, maxpre;
|
||||
|
||||
if ((error = sparc64_dmamap_create(pt->dt_parent, dt, flags, mapp)) != 0)
|
||||
if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
|
||||
return (error);
|
||||
KASSERT(SLIST_EMPTY(&(*mapp)->dm_reslist),
|
||||
("iommu_dvmamap_create: hierarchy botched"));
|
||||
@ -751,13 +749,14 @@ iommu_dvmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, int flags,
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_dvmamap_destroy(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map)
|
||||
static int
|
||||
iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
|
||||
iommu_dvma_vfree(is, map);
|
||||
return (sparc64_dmamap_destroy(pt->dt_parent, dt, map));
|
||||
sparc64_dma_free_map(dt, map);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -842,12 +841,12 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_dvmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
|
||||
static int
|
||||
iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
|
||||
bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
|
||||
int flags)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t sgs[dt->dt_nsegments];
|
||||
#else
|
||||
@ -878,11 +877,11 @@ iommu_dvmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_dvmamap_load_mbuf(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
struct mbuf *m0, bus_dmamap_callback2_t *cb, void *cba, int flags)
|
||||
static int
|
||||
iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
|
||||
bus_dmamap_callback2_t *cb, void *cba, int flags)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t sgs[dt->dt_nsegments];
|
||||
#else
|
||||
@ -923,11 +922,11 @@ iommu_dvmamap_load_mbuf(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_dvmamap_load_uio(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
struct uio *uio, bus_dmamap_callback2_t *cb, void *cba, int flags)
|
||||
static int
|
||||
iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
|
||||
bus_dmamap_callback2_t *cb, void *cba, int flags)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t sgs[dt->dt_nsegments];
|
||||
#else
|
||||
@ -982,23 +981,22 @@ iommu_dvmamap_load_uio(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
return (error);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_dvmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map)
|
||||
static void
|
||||
iommu_dvmamap_unload(bus_dma_tag_t dt, bus_dmamap_t map)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
|
||||
if (map->dm_loaded == 0)
|
||||
return;
|
||||
iommu_dvmamap_vunload(is, map);
|
||||
iommu_map_insq(map);
|
||||
sparc64_dmamap_unload(pt->dt_parent, dt, map);
|
||||
map->dm_loaded = 0;
|
||||
}
|
||||
|
||||
void
|
||||
iommu_dvmamap_sync(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t map,
|
||||
bus_dmasync_op_t op)
|
||||
static void
|
||||
iommu_dvmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
{
|
||||
struct iommu_state *is = pt->dt_cookie;
|
||||
struct iommu_state *is = dt->dt_cookie;
|
||||
struct bus_dmamap_res *r;
|
||||
vm_offset_t va;
|
||||
vm_size_t len;
|
||||
@ -1055,3 +1053,15 @@ iommu_diag(struct iommu_state *is, vm_offset_t va)
|
||||
}
|
||||
|
||||
#endif /* IOMMU_DIAG */
|
||||
|
||||
struct bus_dma_methods iommu_dma_methods = {
|
||||
iommu_dvmamap_create,
|
||||
iommu_dvmamap_destroy,
|
||||
iommu_dvmamap_load,
|
||||
iommu_dvmamap_load_mbuf,
|
||||
iommu_dvmamap_load_uio,
|
||||
iommu_dvmamap_unload,
|
||||
iommu_dvmamap_sync,
|
||||
iommu_dvmamem_alloc,
|
||||
iommu_dvmamem_free,
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user