Sync bounce pages support with the alpha backend. More precisely:

o use a mutex to protect the bounce pages structure.
	o use a SYSINIT function to initialize the bounce pages structures
	  and thus avoid a race condition in alloc_bounce_pages().
	o add support for the BUS_DMA_NOWAIT flag in bus_dmamap_load().
	o remove obsolete splhigh()/splx() calls.
	o remove printf() about incorrect locking in busdma_swi() and sync
	  busdma_swi() with the one of the alpha backend.
	o use __FBSDID.
This commit is contained in:
mux 2003-07-01 18:08:05 +00:00
parent a627a459f8
commit 7f5998c707

View File

@ -22,10 +22,11 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
@ -75,6 +76,7 @@ struct bounce_page {
int busdma_swi_pending;
static struct mtx bounce_lock;
static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
static int free_bpages;
static int reserved_bpages;
@ -100,6 +102,7 @@ static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
static struct bus_dmamap nobounce_dmamap;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
@ -514,24 +517,27 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
int s;
s = splhigh();
if (reserve_bounce_pages(dmat, map) != 0) {
/* Queue us for resources */
map->dmat = dmat;
map->buf = buf;
map->buflen = buflen;
map->callback = callback;
map->callback_arg = callback_arg;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
splx(s);
return (EINPROGRESS);
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
map->dmat = dmat;
map->buf = buf;
map->buflen = buflen;
map->callback = callback;
map->callback_arg = callback_arg;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
}
splx(s);
mtx_unlock(&bounce_lock);
}
vaddr = (vm_offset_t)buf;
@ -846,21 +852,29 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
}
}
static void
init_bounce_pages(void *dummy __unused)
{
free_bpages = 0;
reserved_bpages = 0;
active_bpages = 0;
total_bpages = 0;
STAILQ_INIT(&bounce_page_list);
STAILQ_INIT(&bounce_map_waitinglist);
STAILQ_INIT(&bounce_map_callbacklist);
mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
}
SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
static int
alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
{
int count;
count = 0;
if (total_bpages == 0) {
STAILQ_INIT(&bounce_page_list);
STAILQ_INIT(&bounce_map_waitinglist);
STAILQ_INIT(&bounce_map_callbacklist);
}
while (numpages > 0) {
struct bounce_page *bpage;
int s;
bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
M_NOWAIT | M_ZERO);
@ -879,11 +893,11 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
break;
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
s = splhigh();
mtx_lock(&bounce_lock);
STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
total_bpages++;
free_bpages++;
splx(s);
mtx_unlock(&bounce_lock);
count++;
numpages--;
}
@ -891,11 +905,14 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
}
static int
reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
{
int pages;
mtx_assert(&bounce_lock, MA_OWNED);
pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
return (map->pagesneeded - (map->pagesreserved + pages));
free_bpages -= pages;
reserved_bpages += pages;
map->pagesreserved += pages;
@ -908,7 +925,6 @@ static vm_offset_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
{
int s;
struct bounce_page *bpage;
if (map->pagesneeded == 0)
@ -919,7 +935,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
panic("add_bounce_page: map doesn't need any pages");
map->pagesreserved--;
s = splhigh();
mtx_lock(&bounce_lock);
bpage = STAILQ_FIRST(&bounce_page_list);
if (bpage == NULL)
panic("add_bounce_page: free page list is empty");
@ -927,7 +943,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
STAILQ_REMOVE_HEAD(&bounce_page_list, links);
reserved_bpages--;
active_bpages++;
splx(s);
mtx_unlock(&bounce_lock);
bpage->datavaddr = vaddr;
bpage->datacount = size;
@ -938,18 +954,17 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
static void
free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
{
int s;
struct bus_dmamap *map;
bpage->datavaddr = 0;
bpage->datacount = 0;
s = splhigh();
mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
free_bpages++;
active_bpages--;
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
if (reserve_bounce_pages(map->dmat, map) == 0) {
if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
@ -957,7 +972,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
swi_sched(vm_ih, 0);
}
}
splx(s);
mtx_unlock(&bounce_lock);
}
void
@ -966,15 +981,16 @@ busdma_swi(void)
bus_dma_tag_t dmat;
struct bus_dmamap *map;
printf("WARNING: busdma_swi is not properly locked!\n");
mtx_lock(&Giant);
mtx_lock(&bounce_lock);
while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
map->callback, map->callback_arg, /*flags*/0);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}
mtx_unlock(&Giant);
mtx_unlock(&bounce_lock);
}