When direct dispatching an netisr (net.isr.enable=1), if there are already
any queued packets for the isr, process those packets before the newly submitted packet, maintaining ordering of all packets being delivered to the netisr. Remove the bypass counter since we don't bypass anymore. Leave the comment about possible problems and options since later performance optimization may change the strategy for addressing ordering problems here. Specifically, this maintains the strong isr ordering guarantee; additional parallelism and lower latency may be possible by moving to weaker guarantees (per-interface, for example). We will probably at some point also want to remove the one instance netisr dispatch limit currently enforced by a mutex, but it's not clear that's 100% safe yet, even in the netperf branch. Reviewed by: sam, others
This commit is contained in:
parent
dd70179215
commit
a2b04c6504
@ -100,7 +100,6 @@ struct isrstat {
|
||||
int isrs_count; /* dispatch count */
|
||||
int isrs_directed; /* ...successfully dispatched */
|
||||
int isrs_deferred; /* ...queued instead */
|
||||
int isrs_bypassed; /* bypassed queued packets */
|
||||
int isrs_queued; /* intentionally queueued */
|
||||
int isrs_swi_count; /* swi_net handlers called */
|
||||
};
|
||||
@ -119,13 +118,29 @@ SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD,
|
||||
&isrstat.isrs_directed, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
|
||||
&isrstat.isrs_deferred, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, bypassed, CTLFLAG_RD,
|
||||
&isrstat.isrs_bypassed, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
|
||||
&isrstat.isrs_queued, 0, "");
|
||||
SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
|
||||
&isrstat.isrs_swi_count, 0, "");
|
||||
|
||||
/*
|
||||
* Process all packets currently present in a netisr queue. Used to
|
||||
* drain an existing set of packets waiting for processing when we
|
||||
* begin direct dispatch, to avoid processing packets out of order.
|
||||
*/
|
||||
static void
|
||||
netisr_processqueue(struct netisr *ni)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
for (;;) {
|
||||
IF_DEQUEUE(ni->ni_queue, m);
|
||||
if (m == NULL)
|
||||
break;
|
||||
ni->ni_handler(m);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the netisr directly instead of queueing the packet, if possible.
|
||||
*
|
||||
@ -163,10 +178,9 @@ netisr_dispatch(int num, struct mbuf *m)
|
||||
* b. fallback to queueing the packet,
|
||||
* c. sweep the issue under the rug and ignore it.
|
||||
*
|
||||
* Currently, we do c), and keep a rough event counter.
|
||||
* Currently, we do a). Previously, we did c).
|
||||
*/
|
||||
if (_IF_QLEN(ni->ni_queue) > 0)
|
||||
isrstat.isrs_bypassed++;
|
||||
netisr_processqueue(ni);
|
||||
ni->ni_handler(m);
|
||||
mtx_unlock(&netisr_mtx);
|
||||
} else {
|
||||
@ -204,7 +218,6 @@ static void
|
||||
swi_net(void *dummy)
|
||||
{
|
||||
struct netisr *ni;
|
||||
struct mbuf *m;
|
||||
u_int bits;
|
||||
int i;
|
||||
#ifdef DEVICE_POLLING
|
||||
@ -230,12 +243,7 @@ swi_net(void *dummy)
|
||||
if (ni->ni_queue == NULL)
|
||||
ni->ni_handler(NULL);
|
||||
else
|
||||
for (;;) {
|
||||
IF_DEQUEUE(ni->ni_queue, m);
|
||||
if (m == NULL)
|
||||
break;
|
||||
ni->ni_handler(m);
|
||||
}
|
||||
netisr_processqueue(ni);
|
||||
}
|
||||
} while (polling);
|
||||
mtx_unlock(&netisr_mtx);
|
||||
|
Loading…
Reference in New Issue
Block a user