Bounce buffers. From John Dyson with help from me.

This commit is contained in:
dg 1994-03-23 09:16:04 +00:00
parent 41365ddbb4
commit 7b347f30ed
9 changed files with 750 additions and 10 deletions

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.38 1994/03/07 11:47:31 davidg Exp $
* $Id: machdep.c,v 1.39 1994/03/19 23:58:58 wollman Exp $
*/
#include "npx.h"
@ -117,7 +117,13 @@ int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
#ifdef BOUNCEPAGES
int bouncepages = BOUNCEPAGES;
#else
int bouncepages = 0;
#endif
extern int freebufspace;
extern char *bouncememory;
int _udatasel, _ucodesel;
@ -244,6 +250,18 @@ cpu_startup()
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
#ifndef NOBOUNCE
/*
* If there is more than 16MB of memory, allocate some bounce buffers
*/
if (Maxmem > 4096) {
if (bouncepages == 0)
bouncepages = 96; /* largest physio size + extra */
v = (caddr_t)((vm_offset_t)((vm_offset_t)v + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
valloc(bouncememory, char, bouncepages * PAGE_SIZE);
}
#endif
/*
* End of first pass, size has been calculated so allocate memory
*/
@ -294,6 +312,13 @@ cpu_startup()
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
#ifndef NOBOUNCE
/*
* init bounce buffers
*/
vm_bounce_init();
#endif
/*
* Set up CPU-specific registers, cache, etc.
*/

View File

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.12 1994/03/07 11:38:36 davidg Exp $
* $Id: vm_machdep.c,v 1.13 1994/03/21 09:35:10 davidg Exp $
*/
#include "npx.h"
@ -53,6 +53,338 @@
#include "vm/vm.h"
#include "vm/vm_kern.h"
#ifndef NOBOUNCE
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages;
vm_map_t bounce_map;
int bmwait, bmfreeing;
int bounceallocarraysize;
unsigned *bounceallocarray;
int bouncefree;
#define SIXTEENMEG (4096*4096)
#define MAXBKVA 512
/* special list that can be used at interrupt time for eventual kva free */
struct kvasfree {
vm_offset_t addr;
vm_offset_t size;
} kvaf[MAXBKVA];
int kvasfreecnt;
/*
* get bounce buffer pages (count physically contiguous)
* (only 1 inplemented now)
*/
vm_offset_t
vm_bounce_page_find(count)
int count;
{
int bit;
int s,i;
if (count != 1)
panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
s = splbio();
retry:
for (i = 0; i < bounceallocarraysize; i++) {
if (bounceallocarray[i] != 0xffffffff) {
if (bit = ffs(~bounceallocarray[i])) {
bounceallocarray[i] |= 1 << (bit - 1) ;
bouncefree -= count;
splx(s);
return bouncepa + (i * 8 * sizeof(unsigned) + (bit - 1)) * NBPG;
}
}
}
tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
goto retry;
}
/*
* free count bounce buffer pages
*/
void
vm_bounce_page_free(pa, count)
vm_offset_t pa;
int count;
{
int allocindex;
int index;
int bit;
if (count != 1)
panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
index = (pa - bouncepa) / NBPG;
if ((index < 0) || (index >= bouncepages))
panic("vm_bounce_page_free -- bad index\n");
allocindex = index / (8 * sizeof(unsigned));
bit = index % (8 * sizeof(unsigned));
bounceallocarray[allocindex] &= ~(1 << bit);
bouncefree += count;
wakeup((caddr_t) &bounceallocarray);
}
/*
* allocate count bounce buffer kva pages
*/
vm_offset_t
vm_bounce_kva(count)
int count;
{
int tofree;
int i;
int startfree;
vm_offset_t kva;
int s = splbio();
startfree = 0;
more:
if (!bmfreeing && (tofree = kvasfreecnt)) {
bmfreeing = 1;
more1:
for (i = startfree; i < kvasfreecnt; i++) {
pmap_remove(kernel_pmap,
kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
kmem_free_wakeup(bounce_map, kvaf[i].addr,
kvaf[i].size);
}
if (kvasfreecnt != tofree) {
startfree = i;
bmfreeing = 0;
goto more;
}
kvasfreecnt = 0;
bmfreeing = 0;
}
if (!(kva = kmem_alloc_pageable(bounce_map, count * NBPG))) {
bmwait = 1;
tsleep((caddr_t) bounce_map, PRIBIO, "bmwait", 0);
goto more;
}
splx(s);
return kva;
}
/*
* init the bounce buffer system
*/
void
vm_bounce_init()
{
vm_offset_t minaddr, maxaddr;
if (bouncepages == 0)
return;
bounceallocarraysize = (bouncepages + (8*sizeof(unsigned))-1) / (8 * sizeof(unsigned));
bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
if (!bounceallocarray)
panic("Cannot allocate bounce resource array\n");
bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
bouncepa = pmap_extract(kernel_pmap, (vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
kvasfreecnt = 0;
}
/*
* do the things necessary to the struct buf to implement
* bounce buffers... inserted before the disk sort
*/
void
vm_bounce_alloc(bp)
struct buf *bp;
{
int countvmpg;
vm_offset_t vastart, vaend;
vm_offset_t vapstart, vapend;
vm_offset_t va, kva;
vm_offset_t pa;
int dobounceflag = 0;
int bounceindex;
int i;
int s;
if (bouncepages == 0)
return;
vastart = (vm_offset_t) bp->b_un.b_addr;
vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
countvmpg = (vapend - vapstart) / NBPG;
/*
* if any page is above 16MB, then go into bounce-buffer mode
*/
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
if (pa >= SIXTEENMEG)
++dobounceflag;
va += NBPG;
}
if (dobounceflag == 0)
return;
if (bouncepages < dobounceflag)
panic("Not enough bounce buffers!!!");
/*
* allocate a replacement kva for b_addr
*/
kva = vm_bounce_kva(countvmpg);
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
if (pa >= SIXTEENMEG) {
/*
* allocate a replacement page
*/
vm_offset_t bpa = vm_bounce_page_find(1);
pmap_enter(kernel_pmap, kva + (NBPG * i), bpa, VM_PROT_DEFAULT,
TRUE);
/*
* if we are writing, the copy the data into the page
*/
if ((bp->b_flags & B_READ) == 0)
bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
} else {
/*
* use original page
*/
pmap_enter(kernel_pmap, kva + (NBPG * i), pa, VM_PROT_DEFAULT,
TRUE);
}
va += NBPG;
}
/*
* flag the buffer as being bounced
*/
bp->b_flags |= B_BOUNCE;
/*
* save the original buffer kva
*/
bp->b_savekva = bp->b_un.b_addr;
/*
* put our new kva into the buffer (offset by original offset)
*/
bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
((vm_offset_t) bp->b_savekva & (NBPG - 1)));
return;
}
/*
* hook into biodone to free bounce buffer
*/
void
vm_bounce_free(bp)
struct buf *bp;
{
int i;
vm_offset_t origkva, bouncekva;
vm_offset_t vastart, vaend;
vm_offset_t vapstart, vapend;
int countbounce = 0;
vm_offset_t firstbouncepa = 0;
int firstbounceindex;
int countvmpg;
vm_offset_t bcount;
int s;
/*
* if this isn't a bounced buffer, then just return
*/
if ((bp->b_flags & B_BOUNCE) == 0)
return;
origkva = (vm_offset_t) bp->b_savekva;
bouncekva = (vm_offset_t) bp->b_un.b_addr;
vastart = bouncekva;
vaend = bouncekva + bp->b_bcount;
bcount = bp->b_bcount;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
countvmpg = (vapend - vapstart) / NBPG;
/*
* check every page in the kva space for b_addr
*/
for (i = 0; i < countvmpg; i++) {
vm_offset_t mybouncepa;
vm_offset_t copycount;
copycount = i386_round_page(bouncekva + 1) - bouncekva;
mybouncepa = pmap_extract(kernel_pmap, i386_trunc_page(bouncekva));
/*
* if this is a bounced pa, then process as one
*/
if ((mybouncepa >= bouncepa) && (mybouncepa < bouncepaend)) {
if (copycount > bcount)
copycount = bcount;
/*
* if this is a read, then copy from bounce buffer into original buffer
*/
if (bp->b_flags & B_READ)
bcopy((caddr_t) bouncekva, (caddr_t) origkva, copycount);
/*
* free the bounce allocation
*/
vm_bounce_page_free(i386_trunc_page(mybouncepa), 1);
}
origkva += copycount;
bouncekva += copycount;
bcount -= copycount;
}
/*
* add the old kva into the "to free" list
*/
bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
kvaf[kvasfreecnt].addr = bouncekva;
kvaf[kvasfreecnt++].size = countvmpg * NBPG;
if (bmwait) {
/*
* if anyone is waiting on the bounce-map, then wakeup
*/
wakeup((caddr_t) bounce_map);
bmwait = 0;
}
bp->b_un.b_addr = bp->b_savekva;
bp->b_savekva = 0;
bp->b_flags &= ~B_BOUNCE;
return;
}
#endif /* NOBOUNCE */
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.38 1994/03/07 11:47:31 davidg Exp $
* $Id: machdep.c,v 1.39 1994/03/19 23:58:58 wollman Exp $
*/
#include "npx.h"
@ -117,7 +117,13 @@ int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
#ifdef BOUNCEPAGES
int bouncepages = BOUNCEPAGES;
#else
int bouncepages = 0;
#endif
extern int freebufspace;
extern char *bouncememory;
int _udatasel, _ucodesel;
@ -244,6 +250,18 @@ cpu_startup()
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
#ifndef NOBOUNCE
/*
* If there is more than 16MB of memory, allocate some bounce buffers
*/
if (Maxmem > 4096) {
if (bouncepages == 0)
bouncepages = 96; /* largest physio size + extra */
v = (caddr_t)((vm_offset_t)((vm_offset_t)v + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
valloc(bouncememory, char, bouncepages * PAGE_SIZE);
}
#endif
/*
* End of first pass, size has been calculated so allocate memory
*/
@ -294,6 +312,13 @@ cpu_startup()
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
#ifndef NOBOUNCE
/*
* init bounce buffers
*/
vm_bounce_init();
#endif
/*
* Set up CPU-specific registers, cache, etc.
*/

View File

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.12 1994/03/07 11:38:36 davidg Exp $
* $Id: vm_machdep.c,v 1.13 1994/03/21 09:35:10 davidg Exp $
*/
#include "npx.h"
@ -53,6 +53,338 @@
#include "vm/vm.h"
#include "vm/vm_kern.h"
#ifndef NOBOUNCE
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages;
vm_map_t bounce_map;
int bmwait, bmfreeing;
int bounceallocarraysize;
unsigned *bounceallocarray;
int bouncefree;
#define SIXTEENMEG (4096*4096)
#define MAXBKVA 512
/* special list that can be used at interrupt time for eventual kva free */
struct kvasfree {
vm_offset_t addr;
vm_offset_t size;
} kvaf[MAXBKVA];
int kvasfreecnt;
/*
* get bounce buffer pages (count physically contiguous)
* (only 1 inplemented now)
*/
vm_offset_t
vm_bounce_page_find(count)
int count;
{
int bit;
int s,i;
if (count != 1)
panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
s = splbio();
retry:
for (i = 0; i < bounceallocarraysize; i++) {
if (bounceallocarray[i] != 0xffffffff) {
if (bit = ffs(~bounceallocarray[i])) {
bounceallocarray[i] |= 1 << (bit - 1) ;
bouncefree -= count;
splx(s);
return bouncepa + (i * 8 * sizeof(unsigned) + (bit - 1)) * NBPG;
}
}
}
tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
goto retry;
}
/*
* free count bounce buffer pages
*/
void
vm_bounce_page_free(pa, count)
vm_offset_t pa;
int count;
{
int allocindex;
int index;
int bit;
if (count != 1)
panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
index = (pa - bouncepa) / NBPG;
if ((index < 0) || (index >= bouncepages))
panic("vm_bounce_page_free -- bad index\n");
allocindex = index / (8 * sizeof(unsigned));
bit = index % (8 * sizeof(unsigned));
bounceallocarray[allocindex] &= ~(1 << bit);
bouncefree += count;
wakeup((caddr_t) &bounceallocarray);
}
/*
* allocate count bounce buffer kva pages
*/
vm_offset_t
vm_bounce_kva(count)
int count;
{
int tofree;
int i;
int startfree;
vm_offset_t kva;
int s = splbio();
startfree = 0;
more:
if (!bmfreeing && (tofree = kvasfreecnt)) {
bmfreeing = 1;
more1:
for (i = startfree; i < kvasfreecnt; i++) {
pmap_remove(kernel_pmap,
kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
kmem_free_wakeup(bounce_map, kvaf[i].addr,
kvaf[i].size);
}
if (kvasfreecnt != tofree) {
startfree = i;
bmfreeing = 0;
goto more;
}
kvasfreecnt = 0;
bmfreeing = 0;
}
if (!(kva = kmem_alloc_pageable(bounce_map, count * NBPG))) {
bmwait = 1;
tsleep((caddr_t) bounce_map, PRIBIO, "bmwait", 0);
goto more;
}
splx(s);
return kva;
}
/*
* init the bounce buffer system
*/
void
vm_bounce_init()
{
vm_offset_t minaddr, maxaddr;
if (bouncepages == 0)
return;
bounceallocarraysize = (bouncepages + (8*sizeof(unsigned))-1) / (8 * sizeof(unsigned));
bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
if (!bounceallocarray)
panic("Cannot allocate bounce resource array\n");
bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
bouncepa = pmap_extract(kernel_pmap, (vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
kvasfreecnt = 0;
}
/*
* do the things necessary to the struct buf to implement
* bounce buffers... inserted before the disk sort
*/
void
vm_bounce_alloc(bp)
struct buf *bp;
{
int countvmpg;
vm_offset_t vastart, vaend;
vm_offset_t vapstart, vapend;
vm_offset_t va, kva;
vm_offset_t pa;
int dobounceflag = 0;
int bounceindex;
int i;
int s;
if (bouncepages == 0)
return;
vastart = (vm_offset_t) bp->b_un.b_addr;
vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
countvmpg = (vapend - vapstart) / NBPG;
/*
* if any page is above 16MB, then go into bounce-buffer mode
*/
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
if (pa >= SIXTEENMEG)
++dobounceflag;
va += NBPG;
}
if (dobounceflag == 0)
return;
if (bouncepages < dobounceflag)
panic("Not enough bounce buffers!!!");
/*
* allocate a replacement kva for b_addr
*/
kva = vm_bounce_kva(countvmpg);
va = vapstart;
for (i = 0; i < countvmpg; i++) {
pa = pmap_extract(kernel_pmap, va);
if (pa >= SIXTEENMEG) {
/*
* allocate a replacement page
*/
vm_offset_t bpa = vm_bounce_page_find(1);
pmap_enter(kernel_pmap, kva + (NBPG * i), bpa, VM_PROT_DEFAULT,
TRUE);
/*
* if we are writing, the copy the data into the page
*/
if ((bp->b_flags & B_READ) == 0)
bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
} else {
/*
* use original page
*/
pmap_enter(kernel_pmap, kva + (NBPG * i), pa, VM_PROT_DEFAULT,
TRUE);
}
va += NBPG;
}
/*
* flag the buffer as being bounced
*/
bp->b_flags |= B_BOUNCE;
/*
* save the original buffer kva
*/
bp->b_savekva = bp->b_un.b_addr;
/*
* put our new kva into the buffer (offset by original offset)
*/
bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
((vm_offset_t) bp->b_savekva & (NBPG - 1)));
return;
}
/*
* hook into biodone to free bounce buffer
*/
void
vm_bounce_free(bp)
struct buf *bp;
{
int i;
vm_offset_t origkva, bouncekva;
vm_offset_t vastart, vaend;
vm_offset_t vapstart, vapend;
int countbounce = 0;
vm_offset_t firstbouncepa = 0;
int firstbounceindex;
int countvmpg;
vm_offset_t bcount;
int s;
/*
* if this isn't a bounced buffer, then just return
*/
if ((bp->b_flags & B_BOUNCE) == 0)
return;
origkva = (vm_offset_t) bp->b_savekva;
bouncekva = (vm_offset_t) bp->b_un.b_addr;
vastart = bouncekva;
vaend = bouncekva + bp->b_bcount;
bcount = bp->b_bcount;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
countvmpg = (vapend - vapstart) / NBPG;
/*
* check every page in the kva space for b_addr
*/
for (i = 0; i < countvmpg; i++) {
vm_offset_t mybouncepa;
vm_offset_t copycount;
copycount = i386_round_page(bouncekva + 1) - bouncekva;
mybouncepa = pmap_extract(kernel_pmap, i386_trunc_page(bouncekva));
/*
* if this is a bounced pa, then process as one
*/
if ((mybouncepa >= bouncepa) && (mybouncepa < bouncepaend)) {
if (copycount > bcount)
copycount = bcount;
/*
* if this is a read, then copy from bounce buffer into original buffer
*/
if (bp->b_flags & B_READ)
bcopy((caddr_t) bouncekva, (caddr_t) origkva, copycount);
/*
* free the bounce allocation
*/
vm_bounce_page_free(i386_trunc_page(mybouncepa), 1);
}
origkva += copycount;
bouncekva += copycount;
bcount -= copycount;
}
/*
* add the old kva into the "to free" list
*/
bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
kvaf[kvasfreecnt].addr = bouncekva;
kvaf[kvasfreecnt++].size = countvmpg * NBPG;
if (bmwait) {
/*
* if anyone is waiting on the bounce-map, then wakeup
*/
wakeup((caddr_t) bounce_map);
bmwait = 0;
}
bp->b_un.b_addr = bp->b_savekva;
bp->b_savekva = 0;
bp->b_flags &= ~B_BOUNCE;
return;
}
#endif /* NOBOUNCE */
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child

View File

@ -12,7 +12,7 @@
* on the understanding that TFS is not responsible for the correct
* functioning of this software in any circumstances.
*
* $Id: aha1542.c,v 1.21 1994/03/01 16:06:37 ats Exp $
* $Id: aha1542.c,v 1.22 1994/03/20 00:29:58 wollman Exp $
*/
/*
@ -589,6 +589,7 @@ ahaattach(dev)
aha->sc_link.adapter_targ = aha->aha_scsi_dev;
aha->sc_link.adapter = &aha_switch;
aha->sc_link.device = &aha_dev;
aha->sc_link.flags = SDEV_BOUNCE;
/*
* ask the adapter what subunits are present

View File

@ -14,7 +14,7 @@
*
* Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
*
* $Id: cd.c,v 1.15 1994/01/29 10:30:32 rgrimes Exp $
* $Id: cd.c,v 1.16 1994/02/05 09:08:46 swallace Exp $
*/
#define SPLCD splbio
@ -419,6 +419,14 @@ cdstrategy(bp)
opri = SPLCD();
dp = &cd->buf_queue;
/*
* Use a bounce buffer if necessary
*/
#ifndef NOBOUNCE
if (cd->sc_link->flags & SDEV_BOUNCE)
vm_bounce_alloc(bp);
#endif
/*
* Place it in the queue of disk activities for this disk
*/

View File

@ -14,7 +14,7 @@
*
* Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
*
* $Id: scsiconf.h,v 1.7 1993/11/18 05:02:59 rgrimes Exp $
* $Id: scsiconf.h,v 1.8 1993/12/19 00:54:55 wollman Exp $
*/
#ifndef SCSI_SCSICONF_H
#define SCSI_SCSICONF_H 1
@ -134,6 +134,7 @@ struct scsi_link
#define SDEV_MEDIA_LOADED 0x01 /* device figures are still valid */
#define SDEV_WAITING 0x02 /* a process is waiting for this */
#define SDEV_OPEN 0x04 /* at least 1 open session */
#define SDEV_BOUNCE 0x08 /* unit requires DMA bounce buffer */
#define SDEV_DBX 0xF0 /* debuging flags (scsi_debug.h) */
/*

View File

@ -14,7 +14,7 @@
*
* Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
*
* $Id: sd.c,v 1.19 1994/03/14 23:09:34 ats Exp $
* $Id: sd.c,v 1.20 1994/03/15 20:49:09 ats Exp $
*/
#define SPLSD splbio
@ -419,6 +419,14 @@ sdstrategy(bp)
opri = SPLSD();
dp = &sd->buf_queue;
/*
* Use a bounce buffer if necessary
*/
#ifndef NOBOUNCE
if (sd->sc_link->flags & SDEV_BOUNCE)
vm_bounce_alloc(bp);
#endif
/*
* Place it in the queue of disk activities for this disk
*/

View File

@ -21,13 +21,13 @@
* 16 Feb 93 Julian Elischer ADDED for SCSI system
* 1.15 is the last version to support MACH and OSF/1
*/
/* $Revision: 1.14 $ */
/* $Revision: 1.15 $ */
/*
* Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
* major changes by Julian Elischer (julian@jules.dialix.oz.au) May 1993
*
* $Id: st.c,v 1.14 1993/12/19 00:54:59 wollman Exp $
* $Id: st.c,v 1.15 1994/01/29 10:30:41 rgrimes Exp $
*/
/*
@ -911,6 +911,14 @@ ststrategy(bp)
stminphys(bp);
opri = splbio();
/*
* Use a bounce buffer if necessary
*/
#ifndef NOBOUNCE
if (st->sc_link->flags & SDEV_BOUNCE)
vm_bounce_alloc(bp);
#endif
/*
* Place it in the queue of activities for this tape
* at the end (a bit silly because we only have on user..