Back out rev.1.91 which implemented bulk read transfers in ugen(4) as
asynchronous. I realize that this means the custom application will not work as written, but it is not okay to break most users of ugen(4). The major problem is that a bulk read transfer is not an interrupt saying that X bytes are available -- it is a request to be able to receive up to X bytes, with T timeout, and S short-transfer-okayness. The timeout is a software mechanism that ugen(4) provides and cannot be implemented using asynchronous reads -- the timeout must start at the time a read is done. The status of up to how many bytes can be received in this transfer and whether a short transfer returns data or error is also encoded at least in ohci(4)'s requests to the controller. Trying to detect the "maximum width" results in using a single buffer of far too small when an application requests a large read. Even if you combat this by replacing all buffers again with the maximal sized read buffer (1kb) that ugen(4) would allow you to use before, you don't get the right semantics -- you have to throw data away or make all the timeouts invalid or make the short-transfer settings invalid. There is no way to do this right without extending the ugen(4) API much further -- it breaks the USB camera interfaces used because they need a chain of many maximal-width transfers, for example, and it makes cross-platform support for all the BSDs gratuitously hard. Instead of trying to do select(2) on a bulk read pipe -- which has neither the information on desired transfer length nor ability to implement timeout -- an application can simply use a kernel thread and pipe to turn that endpoint into something poll-able. It is unfortunate that bulk endpoints cannot provide the same semantics that interrupt and isochronous endpoints can, but it is possible to just use ioctl(USB_GET_ENDPOINT_DESC) to find out when different semantics must be used without preventing the normal users of the ugen(4) device from working.
This commit is contained in:
parent
6f4006f1f5
commit
601486239c
@ -122,14 +122,6 @@ struct ugen_endpoint {
|
||||
void *dmabuf;
|
||||
u_int16_t sizes[UGEN_NISORFRMS];
|
||||
} isoreqs[UGEN_NISOREQS];
|
||||
struct {
|
||||
usbd_xfer_handle xfer;
|
||||
int err;
|
||||
int len;
|
||||
int maxlen;
|
||||
void *buf;
|
||||
int datardy;
|
||||
} bulkreq;
|
||||
};
|
||||
|
||||
struct ugen_softc {
|
||||
@ -216,7 +208,6 @@ Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
|
||||
usbd_status status);
|
||||
Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
|
||||
usbd_status status);
|
||||
Static void ugen_rdcb(usbd_xfer_handle, usbd_private_handle, usbd_status);
|
||||
Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
|
||||
Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
|
||||
Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
|
||||
@ -504,32 +495,6 @@ ugenopen(struct cdev *dev, int flag, int mode, usb_proc_ptr p)
|
||||
edesc->bEndpointAddress, 0, &sce->pipeh);
|
||||
if (err)
|
||||
return (EIO);
|
||||
|
||||
if (dir == OUT)
|
||||
break;
|
||||
|
||||
/* If this is the read pipe, set up an async xfer. */
|
||||
isize = UGETW(edesc->wMaxPacketSize);
|
||||
if (isize == 0) /* shouldn't happen */
|
||||
return (EINVAL);
|
||||
sce->bulkreq.buf = malloc(isize, M_USBDEV, M_WAITOK);
|
||||
DPRINTFN(5, ("ugenopen: bulk endpt=%d,isize=%d\n",
|
||||
endpt, isize));
|
||||
sce->bulkreq.xfer = usbd_alloc_xfer(sc->sc_udev);
|
||||
if (sce->bulkreq.xfer == 0) {
|
||||
free(sce->bulkreq.buf, M_USBDEV);
|
||||
return (ENOMEM);
|
||||
}
|
||||
sce->bulkreq.maxlen = isize;
|
||||
sce->bulkreq.err = 0;
|
||||
sce->bulkreq.datardy = 0;
|
||||
usbd_setup_xfer(sce->bulkreq.xfer, sce->pipeh, sce,
|
||||
sce->bulkreq.buf, sce->bulkreq.maxlen,
|
||||
sce->state & UGEN_SHORT_OK ?
|
||||
USBD_SHORT_XFER_OK : 0, sce->timeout,
|
||||
ugen_rdcb);
|
||||
usbd_transfer(sce->bulkreq.xfer);
|
||||
|
||||
break;
|
||||
case UE_ISOCHRONOUS:
|
||||
if (dir == OUT)
|
||||
@ -651,15 +616,6 @@ ugenclose(struct cdev *dev, int flag, int mode, usb_proc_ptr p)
|
||||
sce->ibuf = NULL;
|
||||
clfree(&sce->q);
|
||||
}
|
||||
|
||||
if (sce->bulkreq.buf != NULL)
|
||||
free(sce->bulkreq.buf, M_USBDEV);
|
||||
if (sce->bulkreq.xfer != NULL) {
|
||||
ugen_rdcb(sce->bulkreq.xfer, sce, USBD_INTERRUPTED);
|
||||
usbd_free_xfer(sce->bulkreq.xfer);
|
||||
sce->bulkreq.xfer = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
sc->sc_is_open[endpt] = 0;
|
||||
UGEN_DEV_CLOSE(dev, sc);
|
||||
@ -667,43 +623,13 @@ ugenclose(struct cdev *dev, int flag, int mode, usb_proc_ptr p)
|
||||
return (0);
|
||||
}
|
||||
|
||||
Static void
|
||||
ugen_rdcb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status)
|
||||
{
|
||||
struct ugen_endpoint *sce;
|
||||
|
||||
sce = priv;
|
||||
|
||||
if (status != USBD_NORMAL_COMPLETION) {
|
||||
if (status == USBD_INTERRUPTED)
|
||||
sce->bulkreq.err = EINTR;
|
||||
else if (status == USBD_TIMEOUT)
|
||||
sce->bulkreq.err = ETIMEDOUT;
|
||||
else
|
||||
sce->bulkreq.err = EIO;
|
||||
} else {
|
||||
sce->bulkreq.err = 0;
|
||||
sce->bulkreq.datardy = 1;
|
||||
usbd_get_xfer_status(xfer, NULL, NULL,
|
||||
&sce->bulkreq.len, NULL);
|
||||
}
|
||||
|
||||
if (sce->state & UGEN_ASLP) {
|
||||
sce->state &= ~UGEN_ASLP;
|
||||
wakeup(sce);
|
||||
}
|
||||
|
||||
selwakeuppri(&sce->rsel, PZERO);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
Static int
|
||||
ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
|
||||
{
|
||||
struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
|
||||
u_int32_t n, tn;
|
||||
int isize;
|
||||
char buf[UGEN_BBSIZE];
|
||||
usbd_xfer_handle xfer;
|
||||
usbd_status err;
|
||||
int s;
|
||||
int error = 0;
|
||||
@ -765,47 +691,32 @@ ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
|
||||
}
|
||||
break;
|
||||
case UE_BULK:
|
||||
isize = UGETW(sce->edesc->wMaxPacketSize);
|
||||
while ((n = min(isize, uio->uio_resid)) != 0) {
|
||||
xfer = usbd_alloc_xfer(sc->sc_udev);
|
||||
if (xfer == 0)
|
||||
return (ENOMEM);
|
||||
while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
|
||||
DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
|
||||
tn = n;
|
||||
|
||||
/* Wait for data to be ready. */
|
||||
|
||||
while (sce->bulkreq.datardy == 0) {
|
||||
sce->state |= UGEN_ASLP;
|
||||
error = tsleep(sce, PCATCH | PZERO,
|
||||
"ugenrd", 0);
|
||||
if (sce->bulkreq.err || error) {
|
||||
sce->state &= ~UGEN_ASLP;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
err = sce->bulkreq.err;
|
||||
if (err == EIO) {
|
||||
error = ENXIO;
|
||||
break;
|
||||
}
|
||||
tn = sce->bulkreq.len;
|
||||
error = uiomove(sce->bulkreq.buf, tn, uio);
|
||||
|
||||
/* Set up a new transfer. */
|
||||
|
||||
sce->bulkreq.datardy = 0;
|
||||
usbd_setup_xfer(sce->bulkreq.xfer, sce->pipeh, sce,
|
||||
sce->bulkreq.buf, sce->bulkreq.maxlen,
|
||||
sce->state & UGEN_SHORT_OK ?
|
||||
USBD_SHORT_XFER_OK : 0, sce->timeout, ugen_rdcb);
|
||||
usbd_transfer(sce->bulkreq.xfer);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
|
||||
DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
|
||||
if (error || tn < n)
|
||||
break;
|
||||
}
|
||||
err = usbd_bulk_transfer(
|
||||
xfer, sce->pipeh,
|
||||
sce->state & UGEN_SHORT_OK ?
|
||||
USBD_SHORT_XFER_OK : 0,
|
||||
sce->timeout, buf, &tn, "ugenrb");
|
||||
if (err) {
|
||||
if (err == USBD_INTERRUPTED)
|
||||
error = EINTR;
|
||||
else if (err == USBD_TIMEOUT)
|
||||
error = ETIMEDOUT;
|
||||
else
|
||||
error = EIO;
|
||||
break;
|
||||
}
|
||||
DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
|
||||
error = uiomove(buf, tn, uio);
|
||||
if (error || tn < n)
|
||||
break;
|
||||
}
|
||||
usbd_free_xfer(xfer);
|
||||
break;
|
||||
case UE_ISOCHRONOUS:
|
||||
s = splusb();
|
||||
@ -995,12 +906,6 @@ ugenpurge(struct cdev *dev)
|
||||
sce = &sc->sc_endpoints[endpt][IN];
|
||||
if (sce->pipeh)
|
||||
usbd_abort_pipe(sce->pipeh);
|
||||
/* cancel async bulk transfer */
|
||||
if (sce->bulkreq.xfer != NULL) {
|
||||
ugen_rdcb(sce->bulkreq.xfer, sce, USBD_IOERROR);
|
||||
usbd_free_xfer(sce->bulkreq.xfer);
|
||||
sce->bulkreq.xfer = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1026,10 +931,6 @@ USB_DETACH(ugen)
|
||||
sce = &sc->sc_endpoints[i][dir];
|
||||
if (sce->pipeh)
|
||||
usbd_abort_pipe(sce->pipeh);
|
||||
/* cancel async bulk transfer */
|
||||
if (sce->bulkreq.xfer != NULL)
|
||||
ugen_rdcb(sce->bulkreq.xfer,
|
||||
sce, USBD_IOERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1321,19 +1222,6 @@ ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
|
||||
sce->state |= UGEN_SHORT_OK;
|
||||
else
|
||||
sce->state &= ~UGEN_SHORT_OK;
|
||||
/*
|
||||
* If this is a bulk data pipe awaiting data, then we
|
||||
* need to restart the current operation with the new
|
||||
* short transfer status set.
|
||||
*/
|
||||
if (sce->bulkreq.xfer != NULL && sce->bulkreq.datardy == 0) {
|
||||
usbd_abort_pipe(sce->pipeh);
|
||||
usbd_setup_xfer(sce->bulkreq.xfer, sce->pipeh, sce,
|
||||
sce->bulkreq.buf, sce->bulkreq.maxlen,
|
||||
sce->state & UGEN_SHORT_OK ?
|
||||
USBD_SHORT_XFER_OK : 0, sce->timeout, ugen_rdcb);
|
||||
usbd_transfer(sce->bulkreq.xfer);
|
||||
}
|
||||
return (0);
|
||||
case USB_SET_TIMEOUT:
|
||||
sce = &sc->sc_endpoints[endpt][IN];
|
||||
@ -1626,20 +1514,12 @@ ugenpoll(struct cdev *dev, int events, usb_proc_ptr p)
|
||||
break;
|
||||
case UE_BULK:
|
||||
/*
|
||||
* We have async transfers for reads now, so we can
|
||||
* select on those. Writes tend to complete immediately
|
||||
* so we can get away without async code for those,
|
||||
* though we should probably do async bulk out transfers
|
||||
* too at some point.
|
||||
* We have no easy way of determining if a read will
|
||||
* yield any data or a write will happen.
|
||||
* Pretend they will.
|
||||
*/
|
||||
if (events & (POLLIN | POLLRDNORM)) {
|
||||
if (sce->bulkreq.datardy)
|
||||
revents |= events & (POLLIN | POLLRDNORM);
|
||||
else
|
||||
selrecord(p, &sce->rsel);
|
||||
}
|
||||
if (events & (POLLOUT | POLLWRNORM))
|
||||
revents |= events & (POLLOUT | POLLWRNORM);
|
||||
revents |= events &
|
||||
(POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user