Optimize flushing of receive queues.

This addresses an issue found and reported for the userland stack in
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=21243

MFC after:		1 week
This commit is contained in:
Michael Tuexen 2020-07-09 16:18:42 +00:00
parent 1a4b982e1e
commit b6734d8f4a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=363046

View File

@ -5411,11 +5411,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
static void
sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
struct sctp_association *asoc,
uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
struct sctp_association *asoc, struct sctp_stream_in *strm,
struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
{
struct sctp_queued_to_read *control;
struct sctp_stream_in *strm;
struct sctp_tmit_chunk *chk, *nchk;
int cnt_removed = 0;
@ -5427,12 +5425,6 @@ sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
* it can be delivered... But for now we just dump everything on the
* queue.
*/
strm = &asoc->strmin[stream];
control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
if (control == NULL) {
/* Not found */
return;
}
if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
return;
}
@ -5609,7 +5601,10 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
/* Flush all the un-ordered data based on cum-tsn */
SCTP_INP_READ_LOCK(stcb->sctp_ep);
for (sid = 0; sid < asoc->streamincnt; sid++) {
sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
strm = &asoc->strmin[sid];
if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
}
}
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
}
@ -5621,7 +5616,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (m && fwd_sz) {
/* New method. */
unsigned int num_str;
uint32_t mid, cur_mid;
uint32_t mid;
uint16_t sid;
uint16_t ordered, flags;
struct sctp_strseq *stseq, strseqbuf;
@ -5688,8 +5683,24 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
asoc->fragmented_delivery_inprogress = 0;
}
strm = &asoc->strmin[sid];
for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
if (ordered) {
TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
}
}
} else {
if (asoc->idata_supported) {
TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
}
}
} else {
if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
}
}
}
TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
if ((control->sinfo_stream == sid) &&