2005-01-06 01:43:34 +00:00
|
|
|
/*-
|
2003-04-17 03:38:03 +00:00
|
|
|
* Copyright (c) 2002-2003
|
2002-09-13 12:31:56 +00:00
|
|
|
* Hidetoshi Shimokawa. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
*
|
|
|
|
* This product includes software developed by Hidetoshi Shimokawa.
|
|
|
|
*
|
|
|
|
* 4. Neither the name of the author nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
2005-10-05 10:09:17 +00:00
|
|
|
#ifdef HAVE_KERNEL_OPTION_HEADERS
|
|
|
|
#include "opt_device_polling.h"
|
2002-09-13 12:31:56 +00:00
|
|
|
#include "opt_inet.h"
|
2005-10-05 10:09:17 +00:00
|
|
|
#endif
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/bus.h>
|
2003-04-17 03:38:03 +00:00
|
|
|
#include <machine/bus.h>
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if.h>
|
2013-10-26 17:58:36 +00:00
|
|
|
#include <net/if_var.h>
|
2002-09-13 12:31:56 +00:00
|
|
|
#include <net/if_arp.h>
|
2005-06-10 16:49:24 +00:00
|
|
|
#include <net/if_types.h>
|
2002-09-13 12:31:56 +00:00
|
|
|
#include <net/if_vlan_var.h>
|
|
|
|
|
|
|
|
#include <dev/firewire/firewire.h>
|
|
|
|
#include <dev/firewire/firewirereg.h>
|
|
|
|
#include <dev/firewire/if_fwevar.h>
|
|
|
|
|
2003-11-06 04:19:15 +00:00
|
|
|
#define FWEDEBUG if (fwedebug) if_printf
|
2003-03-06 05:06:44 +00:00
|
|
|
#define TX_MAX_QUEUE (FWMAXQUEUE - 1)
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
/* network interface */
|
2004-01-06 14:30:47 +00:00
|
|
|
static void fwe_start (struct ifnet *);
|
|
|
|
static int fwe_ioctl (struct ifnet *, u_long, caddr_t);
|
|
|
|
static void fwe_init (void *);
|
2002-09-13 12:31:56 +00:00
|
|
|
|
2004-01-06 14:30:47 +00:00
|
|
|
static void fwe_output_callback (struct fw_xfer *);
|
|
|
|
static void fwe_as_output (struct fwe_softc *, struct ifnet *);
|
|
|
|
static void fwe_as_input (struct fw_xferq *);
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
static int fwedebug = 0;
|
|
|
|
static int stream_ch = 1;
|
2003-06-10 02:27:39 +00:00
|
|
|
static int tx_speed = 2;
|
2003-11-13 06:29:40 +00:00
|
|
|
static int rx_queue_len = FWMAXQUEUE;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
2011-11-07 06:44:47 +00:00
|
|
|
static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RWTUN, &fwedebug, 0, "");
|
2002-09-13 12:31:56 +00:00
|
|
|
SYSCTL_DECL(_hw_firewire);
|
2011-11-07 15:43:11 +00:00
|
|
|
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0,
|
2003-11-13 06:29:40 +00:00
|
|
|
"Ethernet emulation subsystem");
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RWTUN, &stream_ch, 0,
|
2002-09-13 12:31:56 +00:00
|
|
|
"Stream channel to use");
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RWTUN, &tx_speed, 0,
|
2003-11-13 06:29:40 +00:00
|
|
|
"Transmission speed");
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
|
2003-11-13 06:29:40 +00:00
|
|
|
0, "Length of the receive queue");
|
|
|
|
|
2002-09-13 12:31:56 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
static poll_handler_t fwe_poll;
|
|
|
|
|
2009-05-30 15:14:44 +00:00
|
|
|
static int
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe;
|
|
|
|
struct firewire_comm *fc;
|
|
|
|
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
|
2009-05-30 15:14:44 +00:00
|
|
|
return (0);
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe;
|
|
|
|
fc = fwe->fd.fc;
|
|
|
|
fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
|
2009-05-30 15:14:44 +00:00
|
|
|
return (0);
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
#endif /* DEVICE_POLLING */
|
|
|
|
|
2002-09-13 12:31:56 +00:00
|
|
|
static void
|
|
|
|
fwe_identify(driver_t *driver, device_t parent)
|
|
|
|
{
|
2003-11-03 13:01:23 +00:00
|
|
|
BUS_ADD_CHILD(parent, 0, "fwe", device_get_unit(parent));
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fwe_probe(device_t dev)
|
|
|
|
{
|
|
|
|
device_t pa;
|
|
|
|
|
|
|
|
pa = device_get_parent(dev);
|
|
|
|
if(device_get_unit(dev) != device_get_unit(pa)){
|
|
|
|
return(ENXIO);
|
|
|
|
}
|
|
|
|
|
2002-12-26 06:50:09 +00:00
|
|
|
device_set_desc(dev, "Ethernet over FireWire");
|
2002-09-13 12:31:56 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fwe_attach(device_t dev)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int unit, s;
|
2005-06-10 16:49:24 +00:00
|
|
|
u_char eaddr[6];
|
2003-01-25 14:47:33 +00:00
|
|
|
struct fw_eui64 *eui;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
fwe = ((struct fwe_softc *)device_get_softc(dev));
|
|
|
|
unit = device_get_unit(dev);
|
|
|
|
|
|
|
|
bzero(fwe, sizeof(struct fwe_softc));
|
2007-06-06 14:31:36 +00:00
|
|
|
mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF);
|
2002-09-13 12:31:56 +00:00
|
|
|
/* XXX */
|
|
|
|
fwe->stream_ch = stream_ch;
|
|
|
|
fwe->dma_ch = -1;
|
|
|
|
|
|
|
|
fwe->fd.fc = device_get_ivars(dev);
|
2004-01-08 14:58:09 +00:00
|
|
|
if (tx_speed < 0)
|
|
|
|
tx_speed = fwe->fd.fc->speed;
|
|
|
|
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe->fd.dev = dev;
|
|
|
|
fwe->fd.post_explore = NULL;
|
|
|
|
fwe->eth_softc.fwe = fwe;
|
|
|
|
|
|
|
|
fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM;
|
|
|
|
fwe->pkt_hdr.mode.stream.sy = 0;
|
|
|
|
fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;
|
|
|
|
|
|
|
|
/* generate fake MAC address: first and last 3bytes from eui64 */
|
|
|
|
#define LOCAL (0x02)
|
|
|
|
#define GROUP (0x01)
|
2003-01-25 14:47:33 +00:00
|
|
|
|
|
|
|
eui = &fwe->fd.fc->eui;
|
|
|
|
eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP;
|
|
|
|
eaddr[1] = FW_EUI64_BYTE(eui, 1);
|
|
|
|
eaddr[2] = FW_EUI64_BYTE(eui, 2);
|
|
|
|
eaddr[3] = FW_EUI64_BYTE(eui, 5);
|
|
|
|
eaddr[4] = FW_EUI64_BYTE(eui, 6);
|
|
|
|
eaddr[5] = FW_EUI64_BYTE(eui, 7);
|
2002-12-06 02:17:30 +00:00
|
|
|
printf("if_fwe%d: Fake Ethernet address: "
|
|
|
|
"%02x:%02x:%02x:%02x:%02x:%02x\n", unit,
|
2002-09-13 12:31:56 +00:00
|
|
|
eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
|
|
|
|
|
|
|
|
/* fill the rest and attach interface */
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER);
|
|
|
|
if (ifp == NULL) {
|
|
|
|
device_printf(dev, "can not if_alloc()\n");
|
|
|
|
return (ENOSPC);
|
|
|
|
}
|
2002-09-13 12:31:56 +00:00
|
|
|
ifp->if_softc = &fwe->eth_softc;
|
|
|
|
|
2003-11-03 13:01:23 +00:00
|
|
|
if_initname(ifp, device_get_name(dev), unit);
|
2002-09-13 12:31:56 +00:00
|
|
|
ifp->if_init = fwe_init;
|
|
|
|
ifp->if_start = fwe_start;
|
|
|
|
ifp->if_ioctl = fwe_ioctl;
|
2007-06-06 14:31:36 +00:00
|
|
|
ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
|
2003-03-06 05:06:44 +00:00
|
|
|
ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
s = splimp();
|
2004-03-26 23:17:10 +00:00
|
|
|
ether_ifattach(ifp, eaddr);
|
2002-09-13 12:31:56 +00:00
|
|
|
splx(s);
|
|
|
|
|
|
|
|
/* Tell the upper layer(s) we support long frames. */
|
2014-08-30 19:55:54 +00:00
|
|
|
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
|
2005-10-11 07:30:06 +00:00
|
|
|
ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_POLLING;
|
2004-05-21 17:11:34 +00:00
|
|
|
ifp->if_capenable |= IFCAP_VLAN_MTU;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
2003-11-06 04:19:15 +00:00
|
|
|
FWEDEBUG(ifp, "interface created\n");
|
2002-09-13 12:31:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fwe_stop(struct fwe_softc *fwe)
|
|
|
|
{
|
|
|
|
struct firewire_comm *fc;
|
|
|
|
struct fw_xferq *xferq;
|
2005-06-10 16:49:24 +00:00
|
|
|
struct ifnet *ifp = fwe->eth_softc.ifp;
|
2003-03-06 05:06:44 +00:00
|
|
|
struct fw_xfer *xfer, *next;
|
|
|
|
int i;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
fc = fwe->fd.fc;
|
|
|
|
|
|
|
|
if (fwe->dma_ch >= 0) {
|
|
|
|
xferq = fc->ir[fwe->dma_ch];
|
|
|
|
|
|
|
|
if (xferq->flag & FWXFERQ_RUNNING)
|
|
|
|
fc->irx_disable(fc, fwe->dma_ch);
|
|
|
|
xferq->flag &=
|
2003-04-17 03:38:03 +00:00
|
|
|
~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
|
|
|
|
FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
|
2003-03-06 05:06:44 +00:00
|
|
|
xferq->hand = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < xferq->bnchunk; i ++)
|
|
|
|
m_freem(xferq->bulkxfer[i].mbuf);
|
|
|
|
free(xferq->bulkxfer, M_FWE);
|
|
|
|
|
|
|
|
for (xfer = STAILQ_FIRST(&fwe->xferlist); xfer != NULL;
|
|
|
|
xfer = next) {
|
|
|
|
next = STAILQ_NEXT(xfer, link);
|
|
|
|
fw_xfer_free(xfer);
|
|
|
|
}
|
|
|
|
STAILQ_INIT(&fwe->xferlist);
|
|
|
|
|
|
|
|
xferq->bulkxfer = NULL;
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe->dma_ch = -1;
|
|
|
|
}
|
|
|
|
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fwe_detach(device_t dev)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe;
|
2005-06-10 16:49:24 +00:00
|
|
|
struct ifnet *ifp;
|
2002-09-13 12:31:56 +00:00
|
|
|
int s;
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
fwe = device_get_softc(dev);
|
|
|
|
ifp = fwe->eth_softc.ifp;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
|
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
if (ifp->if_capenable & IFCAP_POLLING)
|
|
|
|
ether_poll_deregister(ifp);
|
|
|
|
#endif
|
2002-09-13 12:31:56 +00:00
|
|
|
s = splimp();
|
|
|
|
|
|
|
|
fwe_stop(fwe);
|
2005-06-10 16:49:24 +00:00
|
|
|
ether_ifdetach(ifp);
|
|
|
|
if_free(ifp);
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
splx(s);
|
2007-06-06 14:31:36 +00:00
|
|
|
mtx_destroy(&fwe->mtx);
|
2002-09-13 12:31:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fwe_init(void *arg)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe = ((struct fwe_eth_softc *)arg)->fwe;
|
|
|
|
struct firewire_comm *fc;
|
2005-06-10 16:49:24 +00:00
|
|
|
struct ifnet *ifp = fwe->eth_softc.ifp;
|
2002-09-13 12:31:56 +00:00
|
|
|
struct fw_xferq *xferq;
|
2003-03-06 05:06:44 +00:00
|
|
|
struct fw_xfer *xfer;
|
2003-04-17 03:38:03 +00:00
|
|
|
struct mbuf *m;
|
2002-09-13 12:31:56 +00:00
|
|
|
int i;
|
|
|
|
|
2003-11-06 04:19:15 +00:00
|
|
|
FWEDEBUG(ifp, "initializing\n");
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
/* XXX keep promiscoud mode */
|
|
|
|
ifp->if_flags |= IFF_PROMISC;
|
|
|
|
|
|
|
|
fc = fwe->fd.fc;
|
|
|
|
if (fwe->dma_ch < 0) {
|
2007-06-06 14:31:36 +00:00
|
|
|
fwe->dma_ch = fw_open_isodma(fc, /* tx */0);
|
|
|
|
if (fwe->dma_ch < 0)
|
|
|
|
return;
|
|
|
|
xferq = fc->ir[fwe->dma_ch];
|
|
|
|
xferq->flag |= FWXFERQ_EXTBUF |
|
|
|
|
FWXFERQ_HANDLER | FWXFERQ_STREAM;
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe->stream_ch = stream_ch;
|
|
|
|
fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;
|
2003-03-19 07:04:11 +00:00
|
|
|
xferq->flag &= ~0xff;
|
2002-09-13 12:31:56 +00:00
|
|
|
xferq->flag |= fwe->stream_ch & 0xff;
|
|
|
|
/* register fwe_input handler */
|
|
|
|
xferq->sc = (caddr_t) fwe;
|
|
|
|
xferq->hand = fwe_as_input;
|
2003-11-13 06:29:40 +00:00
|
|
|
xferq->bnchunk = rx_queue_len;
|
2003-03-06 05:06:44 +00:00
|
|
|
xferq->bnpacket = 1;
|
|
|
|
xferq->psize = MCLBYTES;
|
|
|
|
xferq->queued = 0;
|
2003-04-17 03:38:03 +00:00
|
|
|
xferq->buf = NULL;
|
2003-03-06 05:06:44 +00:00
|
|
|
xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
|
2003-04-17 03:38:03 +00:00
|
|
|
sizeof(struct fw_bulkxfer) * xferq->bnchunk,
|
|
|
|
M_FWE, M_WAITOK);
|
2003-03-06 05:06:44 +00:00
|
|
|
if (xferq->bulkxfer == NULL) {
|
|
|
|
printf("if_fwe: malloc failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
STAILQ_INIT(&xferq->stvalid);
|
|
|
|
STAILQ_INIT(&xferq->stfree);
|
|
|
|
STAILQ_INIT(&xferq->stdma);
|
|
|
|
xferq->stproc = NULL;
|
|
|
|
for (i = 0; i < xferq->bnchunk; i ++) {
|
2012-12-04 09:32:43 +00:00
|
|
|
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
|
2003-04-17 03:38:03 +00:00
|
|
|
xferq->bulkxfer[i].mbuf = m;
|
2008-03-25 09:39:02 +00:00
|
|
|
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
|
|
|
|
STAILQ_INSERT_TAIL(&xferq->stfree,
|
|
|
|
&xferq->bulkxfer[i], link);
|
2003-03-06 05:06:44 +00:00
|
|
|
}
|
|
|
|
STAILQ_INIT(&fwe->xferlist);
|
|
|
|
for (i = 0; i < TX_MAX_QUEUE; i++) {
|
|
|
|
xfer = fw_xfer_alloc(M_FWE);
|
|
|
|
if (xfer == NULL)
|
|
|
|
break;
|
2003-10-02 04:06:56 +00:00
|
|
|
xfer->send.spd = tx_speed;
|
2003-03-06 05:06:44 +00:00
|
|
|
xfer->fc = fwe->fd.fc;
|
|
|
|
xfer->sc = (caddr_t)fwe;
|
2007-03-16 05:39:33 +00:00
|
|
|
xfer->hand = fwe_output_callback;
|
2003-03-06 05:06:44 +00:00
|
|
|
STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link);
|
|
|
|
}
|
2002-09-13 12:31:56 +00:00
|
|
|
} else
|
|
|
|
xferq = fc->ir[fwe->dma_ch];
|
|
|
|
|
|
|
|
|
|
|
|
/* start dma */
|
|
|
|
if ((xferq->flag & FWXFERQ_RUNNING) == 0)
|
|
|
|
fc->irx_enable(fc, fwe->dma_ch);
|
|
|
|
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
#if 0
|
|
|
|
/* attempt to start output */
|
|
|
|
fwe_start(ifp);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fwe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe;
|
|
|
|
struct ifstat *ifs = NULL;
|
2014-01-07 15:59:33 +00:00
|
|
|
int s, error;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
s = splimp();
|
|
|
|
if (ifp->if_flags & IFF_UP) {
|
2005-08-09 10:20:02 +00:00
|
|
|
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe_init(&fwe->eth_softc);
|
|
|
|
} else {
|
2005-08-09 10:20:02 +00:00
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe_stop(fwe);
|
|
|
|
}
|
|
|
|
/* XXX keep promiscoud mode */
|
|
|
|
ifp->if_flags |= IFF_PROMISC;
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
2003-01-05 14:58:45 +00:00
|
|
|
break;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
case SIOCGIFSTATUS:
|
|
|
|
s = splimp();
|
|
|
|
ifs = (struct ifstat *)data;
|
2014-01-07 15:59:33 +00:00
|
|
|
snprintf(ifs->ascii, sizeof(ifs->ascii),
|
|
|
|
"\tch %d dma %d\n", fwe->stream_ch, fwe->dma_ch);
|
2002-09-13 12:31:56 +00:00
|
|
|
splx(s);
|
2003-01-05 14:58:45 +00:00
|
|
|
break;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
case SIOCSIFCAP:
|
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
{
|
|
|
|
struct ifreq *ifr = (struct ifreq *) data;
|
2009-02-09 16:58:18 +00:00
|
|
|
struct firewire_comm *fc = fwe->fd.fc;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
|
|
|
|
if (ifr->ifr_reqcap & IFCAP_POLLING &&
|
|
|
|
!(ifp->if_capenable & IFCAP_POLLING)) {
|
|
|
|
error = ether_poll_register(fwe_poll, ifp);
|
|
|
|
if (error)
|
|
|
|
return(error);
|
|
|
|
/* Disable interrupts */
|
|
|
|
fc->set_intr(fc, 0);
|
|
|
|
ifp->if_capenable |= IFCAP_POLLING;
|
2009-05-30 15:14:44 +00:00
|
|
|
ifp->if_capenable |= IFCAP_POLLING_NOCOUNT;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
|
|
|
|
ifp->if_capenable & IFCAP_POLLING) {
|
|
|
|
error = ether_poll_deregister(ifp);
|
|
|
|
/* Enable interrupts. */
|
|
|
|
fc->set_intr(fc, 1);
|
|
|
|
ifp->if_capenable &= ~IFCAP_POLLING;
|
2009-05-30 15:14:44 +00:00
|
|
|
ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* DEVICE_POLLING */
|
|
|
|
break;
|
2002-09-13 12:31:56 +00:00
|
|
|
default:
|
2002-11-14 23:54:55 +00:00
|
|
|
s = splimp();
|
|
|
|
error = ether_ioctl(ifp, cmd, data);
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2003-03-06 05:06:44 +00:00
|
|
|
static void
|
|
|
|
fwe_output_callback(struct fw_xfer *xfer)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
fwe = (struct fwe_softc *)xfer->sc;
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = fwe->eth_softc.ifp;
|
2003-03-06 05:06:44 +00:00
|
|
|
/* XXX error check */
|
2003-11-06 04:19:15 +00:00
|
|
|
FWEDEBUG(ifp, "resp = %d\n", xfer->resp);
|
2003-03-06 05:06:44 +00:00
|
|
|
if (xfer->resp != 0)
|
|
|
|
ifp->if_oerrors ++;
|
|
|
|
|
|
|
|
m_freem(xfer->mbuf);
|
|
|
|
fw_xfer_unload(xfer);
|
2003-04-17 03:38:03 +00:00
|
|
|
|
2003-03-06 05:06:44 +00:00
|
|
|
s = splimp();
|
2007-06-06 14:31:36 +00:00
|
|
|
FWE_LOCK(fwe);
|
2003-03-06 05:06:44 +00:00
|
|
|
STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link);
|
2007-06-06 14:31:36 +00:00
|
|
|
FWE_UNLOCK(fwe);
|
2003-03-06 05:06:44 +00:00
|
|
|
splx(s);
|
2003-04-17 03:38:03 +00:00
|
|
|
|
|
|
|
/* for queue full */
|
2003-03-06 05:06:44 +00:00
|
|
|
if (ifp->if_snd.ifq_head != NULL)
|
|
|
|
fwe_start(ifp);
|
|
|
|
}
|
|
|
|
|
2002-09-13 12:31:56 +00:00
|
|
|
static void
|
|
|
|
fwe_start(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe;
|
|
|
|
int s;
|
|
|
|
|
2003-11-06 04:19:15 +00:00
|
|
|
FWEDEBUG(ifp, "starting\n");
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
if (fwe->dma_ch < 0) {
|
|
|
|
struct mbuf *m = NULL;
|
|
|
|
|
2003-11-06 04:19:15 +00:00
|
|
|
FWEDEBUG(ifp, "not ready\n");
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
s = splimp();
|
|
|
|
do {
|
|
|
|
IF_DEQUEUE(&ifp->if_snd, m);
|
|
|
|
if (m != NULL)
|
|
|
|
m_freem(m);
|
|
|
|
ifp->if_oerrors ++;
|
|
|
|
} while (m != NULL);
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = splimp();
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
if (ifp->if_snd.ifq_len != 0)
|
|
|
|
fwe_as_output(fwe, ifp);
|
|
|
|
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
2002-09-13 12:31:56 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HDR_LEN 4
|
2003-03-06 05:06:44 +00:00
|
|
|
#ifndef ETHER_ALIGN
|
|
|
|
#define ETHER_ALIGN 2
|
|
|
|
#endif
|
2002-09-13 12:31:56 +00:00
|
|
|
/* Async. stream output */
|
|
|
|
static void
|
|
|
|
fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct mbuf *m;
|
|
|
|
struct fw_xfer *xfer;
|
|
|
|
struct fw_xferq *xferq;
|
|
|
|
struct fw_pkt *fp;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
xfer = NULL;
|
|
|
|
xferq = fwe->fd.fc->atq;
|
2007-06-06 14:31:36 +00:00
|
|
|
while ((xferq->queued < xferq->maxq - 1) &&
|
|
|
|
(ifp->if_snd.ifq_head != NULL)) {
|
|
|
|
FWE_LOCK(fwe);
|
2003-03-06 05:06:44 +00:00
|
|
|
xfer = STAILQ_FIRST(&fwe->xferlist);
|
2002-09-13 12:31:56 +00:00
|
|
|
if (xfer == NULL) {
|
2007-06-06 14:31:36 +00:00
|
|
|
#if 0
|
2003-03-06 05:06:44 +00:00
|
|
|
printf("if_fwe: lack of xfer\n");
|
2007-06-06 14:31:36 +00:00
|
|
|
#endif
|
|
|
|
FWE_UNLOCK(fwe);
|
|
|
|
break;
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
2007-06-06 14:31:36 +00:00
|
|
|
STAILQ_REMOVE_HEAD(&fwe->xferlist, link);
|
|
|
|
FWE_UNLOCK(fwe);
|
|
|
|
|
2003-03-06 05:06:44 +00:00
|
|
|
IF_DEQUEUE(&ifp->if_snd, m);
|
2007-06-06 14:31:36 +00:00
|
|
|
if (m == NULL) {
|
|
|
|
FWE_LOCK(fwe);
|
|
|
|
STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link);
|
|
|
|
FWE_UNLOCK(fwe);
|
2003-03-06 05:06:44 +00:00
|
|
|
break;
|
2007-06-06 14:31:36 +00:00
|
|
|
}
|
2004-03-26 23:17:10 +00:00
|
|
|
BPF_MTAP(ifp, m);
|
2002-09-13 12:31:56 +00:00
|
|
|
|
|
|
|
/* keep ip packet alignment for alpha */
|
2012-12-04 09:32:43 +00:00
|
|
|
M_PREPEND(m, ETHER_ALIGN, M_NOWAIT);
|
2003-10-02 04:06:56 +00:00
|
|
|
fp = &xfer->send.hdr;
|
2004-05-22 16:14:17 +00:00
|
|
|
*(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr;
|
2003-04-17 03:38:03 +00:00
|
|
|
fp->mode.stream.len = m->m_pkthdr.len;
|
2002-09-13 12:31:56 +00:00
|
|
|
xfer->mbuf = m;
|
2003-10-02 04:06:56 +00:00
|
|
|
xfer->send.pay_len = m->m_pkthdr.len;
|
2002-09-13 12:31:56 +00:00
|
|
|
|
2003-03-06 05:06:44 +00:00
|
|
|
if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) {
|
2002-09-13 12:31:56 +00:00
|
|
|
/* error */
|
|
|
|
ifp->if_oerrors ++;
|
|
|
|
/* XXX set error code */
|
|
|
|
fwe_output_callback(xfer);
|
|
|
|
} else {
|
|
|
|
ifp->if_opackets ++;
|
2003-03-06 05:06:44 +00:00
|
|
|
i++;
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#if 0
|
|
|
|
if (i > 1)
|
|
|
|
printf("%d queued\n", i);
|
|
|
|
#endif
|
2003-03-06 05:06:44 +00:00
|
|
|
if (i > 0)
|
|
|
|
xferq->start(fwe->fd.fc);
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Async. stream output */
|
|
|
|
static void
|
|
|
|
fwe_as_input(struct fw_xferq *xferq)
|
|
|
|
{
|
2003-04-17 03:38:03 +00:00
|
|
|
struct mbuf *m, *m0;
|
2002-09-13 12:31:56 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct fwe_softc *fwe;
|
2003-03-06 05:06:44 +00:00
|
|
|
struct fw_bulkxfer *sxfer;
|
|
|
|
struct fw_pkt *fp;
|
2002-09-13 12:31:56 +00:00
|
|
|
u_char *c;
|
|
|
|
|
|
|
|
fwe = (struct fwe_softc *)xferq->sc;
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = fwe->eth_softc.ifp;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
|
2007-06-06 14:31:36 +00:00
|
|
|
/* We do not need a lock here because the bottom half is serialized */
|
2003-03-06 05:06:44 +00:00
|
|
|
while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
|
|
|
|
STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
|
2003-04-17 03:38:03 +00:00
|
|
|
fp = mtod(sxfer->mbuf, struct fw_pkt *);
|
2003-03-06 05:06:44 +00:00
|
|
|
if (fwe->fd.fc->irx_post != NULL)
|
|
|
|
fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld);
|
|
|
|
m = sxfer->mbuf;
|
|
|
|
|
2003-08-19 08:50:35 +00:00
|
|
|
/* insert new rbuf */
|
2012-12-04 09:32:43 +00:00
|
|
|
sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
|
2003-04-17 03:38:03 +00:00
|
|
|
if (m0 != NULL) {
|
|
|
|
m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
|
|
|
|
STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
|
|
|
|
} else
|
2007-06-06 14:31:36 +00:00
|
|
|
printf("%s: m_getcl failed\n", __FUNCTION__);
|
2003-03-06 05:06:44 +00:00
|
|
|
|
2003-08-19 08:50:35 +00:00
|
|
|
if (sxfer->resp != 0 || fp->mode.stream.len <
|
|
|
|
ETHER_ALIGN + sizeof(struct ether_header)) {
|
|
|
|
m_freem(m);
|
|
|
|
ifp->if_ierrors ++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-03-06 05:06:44 +00:00
|
|
|
m->m_data += HDR_LEN + ETHER_ALIGN;
|
2007-06-06 14:31:36 +00:00
|
|
|
c = mtod(m, u_char *);
|
2004-07-20 03:56:23 +00:00
|
|
|
m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN;
|
2002-09-13 12:31:56 +00:00
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
#if 0
|
2003-11-06 04:19:15 +00:00
|
|
|
FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n"
|
2002-09-13 12:31:56 +00:00
|
|
|
"%02x %02x %02x %02x %02x %02x\n"
|
|
|
|
"%02x %02x %02x %02x\n"
|
|
|
|
"%02x %02x %02x %02x\n"
|
|
|
|
"%02x %02x %02x %02x\n"
|
|
|
|
"%02x %02x %02x %02x\n",
|
|
|
|
c[0], c[1], c[2], c[3], c[4], c[5],
|
|
|
|
c[6], c[7], c[8], c[9], c[10], c[11],
|
|
|
|
c[12], c[13], c[14], c[15],
|
|
|
|
c[16], c[17], c[18], c[19],
|
|
|
|
c[20], c[21], c[22], c[23],
|
|
|
|
c[20], c[21], c[22], c[23]
|
|
|
|
);
|
|
|
|
#endif
|
2004-03-26 23:17:10 +00:00
|
|
|
(*ifp->if_input)(ifp, m);
|
2002-09-13 12:31:56 +00:00
|
|
|
ifp->if_ipackets ++;
|
|
|
|
}
|
2003-03-06 05:06:44 +00:00
|
|
|
if (STAILQ_FIRST(&xferq->stfree) != NULL)
|
|
|
|
fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch);
|
2002-09-13 12:31:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static devclass_t fwe_devclass;
|
|
|
|
|
|
|
|
static device_method_t fwe_methods[] = {
|
|
|
|
/* device interface */
|
|
|
|
DEVMETHOD(device_identify, fwe_identify),
|
|
|
|
DEVMETHOD(device_probe, fwe_probe),
|
|
|
|
DEVMETHOD(device_attach, fwe_attach),
|
|
|
|
DEVMETHOD(device_detach, fwe_detach),
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t fwe_driver = {
|
2003-11-03 13:01:23 +00:00
|
|
|
"fwe",
|
2002-09-13 12:31:56 +00:00
|
|
|
fwe_methods,
|
|
|
|
sizeof(struct fwe_softc),
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2003-04-15 06:37:30 +00:00
|
|
|
DRIVER_MODULE(fwe, firewire, fwe_driver, fwe_devclass, 0, 0);
|
|
|
|
MODULE_VERSION(fwe, 1);
|
|
|
|
MODULE_DEPEND(fwe, firewire, 1, 1, 1);
|