Hold an explicit reference on the socket for the aiotx task.

Previously, the aiotx task relied on the aio jobs in the queue to hold
a reference on the socket.  However, when the last job is completed,
there is nothing left to hold a reference to the socket buffer lock
used to check if the queue is empty.  In addition, if the last job on
the queue is cancelled, the task can run with no queued jobs holding a
reference to the socket buffer lock the task uses to notice the queue
is empty.

Fix these races by holding an explicit reference on the socket when
the task is queued and dropping that reference when the task
completes.

Reviewed by:	np
MFC after:	1 week
Sponsored by:	Chelsio Communications
Differential Revision:	https://reviews.freebsd.org/D20539
This commit is contained in:
jhb 2019-06-27 19:36:30 +00:00
parent 2b448ee3ce
commit 6903ceff77
2 changed files with 15 additions and 12 deletions

View File

@ -74,7 +74,7 @@ __FBSDID("$FreeBSD$");
#include "tom/t4_tom.h"
static void t4_aiotx_cancel(struct kaiocb *job);
static void t4_aiotx_queue_toep(struct toepcb *toep);
static void t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep);
static size_t
aiotx_mbuf_pgoff(struct mbuf *m)
@ -785,7 +785,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
if (sowwakeup) {
if (!TAILQ_EMPTY(
&toep->aiotx_jobq))
t4_aiotx_queue_toep(
t4_aiotx_queue_toep(so,
toep);
sowwakeup_locked(so);
} else
@ -829,7 +829,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
}
if (sowwakeup) {
if (!TAILQ_EMPTY(&toep->aiotx_jobq))
t4_aiotx_queue_toep(toep);
t4_aiotx_queue_toep(so, toep);
sowwakeup_locked(so);
} else
SOCKBUF_UNLOCK(sb);
@ -1821,7 +1821,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
tls_ofld->sb_off -= plen;
}
if (!TAILQ_EMPTY(&toep->aiotx_jobq))
t4_aiotx_queue_toep(toep);
t4_aiotx_queue_toep(so, toep);
sowwakeup_locked(so); /* unlocks so_snd */
}
SOCKBUF_UNLOCK_ASSERT(sb);
@ -2195,10 +2195,10 @@ static void
t4_aiotx_task(void *context, int pending)
{
struct toepcb *toep = context;
struct inpcb *inp = toep->inp;
struct socket *so = inp->inp_socket;
struct socket *so;
struct kaiocb *job;
so = toep->aiotx_so;
CURVNET_SET(toep->vnet);
SOCKBUF_LOCK(&so->so_snd);
while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) {
@ -2209,15 +2209,17 @@ t4_aiotx_task(void *context, int pending)
t4_aiotx_process_job(toep, so, job);
}
toep->aiotx_task_active = false;
toep->aiotx_so = NULL;
SOCKBUF_UNLOCK(&so->so_snd);
CURVNET_RESTORE();
free_toepcb(toep);
SOCK_LOCK(so);
sorele(so);
}
static void
t4_aiotx_queue_toep(struct toepcb *toep)
t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep)
{
SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd);
@ -2225,9 +2227,10 @@ t4_aiotx_queue_toep(struct toepcb *toep)
CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s",
__func__, toep->tid, toep->aiotx_task_active ? "true" : "false");
#endif
if (toep->aiotx_task_active)
if (toep->aiotx_so != NULL)
return;
toep->aiotx_task_active = true;
soref(so);
toep->aiotx_so = so;
hold_toepcb(toep);
soaio_enqueue(&toep->aiotx_task);
}
@ -2284,7 +2287,7 @@ t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job)
panic("new job was cancelled");
TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list);
if (sowriteable(so))
t4_aiotx_queue_toep(toep);
t4_aiotx_queue_toep(so, toep);
SOCKBUF_UNLOCK(&so->so_snd);
return (0);
}

View File

@ -194,7 +194,7 @@ struct toepcb {
TAILQ_HEAD(, kaiocb) aiotx_jobq;
struct task aiotx_task;
bool aiotx_task_active;
struct socket *aiotx_so;
/* Tx software descriptor */
uint8_t txsd_total;