Get rid of a lot of duplicated code for NR-SACK handle.

Generalize the SACK to code handle also NR-SACKs.
This commit is contained in:
Michael Tuexen 2010-01-17 21:00:28 +00:00
parent e34b217f91
commit cd55430963
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=202526
6 changed files with 241 additions and 1945 deletions

View File

@ -284,13 +284,6 @@ struct sctp_sack_chunk {
struct sctp_sack sack;
} SCTP_PACKED;
/* EY Following 3 structs define NR Selective Ack (NR_SACK) chunk */
struct sctp_nr_gap_ack_block {
uint16_t start; /* NR Gap Ack block start */
uint16_t end; /* NR Gap Ack block end */
} SCTP_PACKED;
struct sctp_nr_sack {
uint32_t cum_tsn_ack; /* cumulative TSN Ack */
uint32_t a_rwnd; /* updated a_rwnd of sender */
@ -299,7 +292,6 @@ struct sctp_nr_sack {
uint16_t num_dup_tsns; /* number of duplicate TSNs */
uint16_t reserved; /* not currently used */
/* struct sctp_gap_ack_block's follow */
/* struct sctp_nr_gap_ack_block's follow */
/* uint32_t duplicate_tsn's follow */
} SCTP_PACKED;

File diff suppressed because it is too large Load Diff

View File

@ -96,18 +96,11 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
uint32_t rwnd, int nonce_sum_flag, int *abort_now);
void
sctp_handle_sack(struct mbuf *m, int offset, struct sctp_sack_chunk *, struct sctp_tcb *,
struct sctp_nets *, int *, int, uint32_t);
/* EY does "exactly" the same as sctp_express_handle_sack */
void
sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
uint32_t rwnd, int nonce_sum_flag, int *abort_now);
/* EY nr_sack version of sctp_handle_sack */
void
sctp_handle_nr_sack(struct mbuf *m, int offset, struct sctp_nr_sack_chunk *, struct sctp_tcb *,
struct sctp_nets *, int *, int, uint32_t);
sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
struct sctp_tcb *stcb, struct sctp_nets *net_from,
uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
int *abort_now, uint8_t flags,
uint32_t cum_ack, uint32_t rwnd);
/* draft-ietf-tsvwg-usctp */
void

View File

@ -4599,16 +4599,18 @@ __attribute__((noinline))
struct sctp_sack_chunk *sack;
int abort_now = 0;
uint32_t a_rwnd, cum_ack;
uint16_t num_seg;
uint16_t num_seg, num_dup;
uint8_t flags;
int offset_seg, offset_dup;
int nonce_sum_flag;
if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
}
return (NULL);
if (stcb == NULL) {
SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
break;
}
if (chk_length < sizeof(struct sctp_sack_chunk)) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
break;
}
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/*-
@ -4619,15 +4621,22 @@ __attribute__((noinline))
break;
}
sack = (struct sctp_sack_chunk *)ch;
nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
flags = ch->chunk_flags;
nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
cum_ack = ntohl(sack->sack.cum_tsn_ack);
num_seg = ntohs(sack->sack.num_gap_ack_blks);
num_dup = ntohs(sack->sack.num_dup_tsns);
a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
if (sizeof(struct sctp_sack_chunk) +
num_seg * sizeof(struct sctp_gap_ack_block) +
num_dup * sizeof(uint32_t) != chk_length) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
break;
}
offset_seg = *offset + sizeof(struct sctp_sack_chunk);
offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
cum_ack,
num_seg,
a_rwnd
);
cum_ack, num_seg, a_rwnd);
stcb->asoc.seen_a_sack_this_pkt = 1;
if ((stcb->asoc.pr_sctp_cnt == 0) &&
(num_seg == 0) &&
@ -4649,19 +4658,21 @@ __attribute__((noinline))
&abort_now);
} else {
if (netp && *netp)
sctp_handle_sack(m, *offset,
sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
}
if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
(stcb->asoc.stream_queue_cnt == 0)) {
sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
sctp_handle_sack(m, offset_seg, offset_dup,
stcb, *netp,
num_seg, 0, num_dup, &abort_now, flags,
cum_ack, a_rwnd);
}
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
return (NULL);
}
if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
(stcb->asoc.stream_queue_cnt == 0)) {
sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
}
}
break;
/*
@ -4675,80 +4686,90 @@ __attribute__((noinline))
struct sctp_nr_sack_chunk *nr_sack;
int abort_now = 0;
uint32_t a_rwnd, cum_ack;
uint16_t num_seg, num_nr_seg;
uint16_t num_seg, num_nr_seg, num_dup;
uint8_t flags;
int offset_seg, offset_dup;
int nonce_sum_flag;
if ((stcb == NULL) || (chk_length < sizeof(struct sctp_nr_sack_chunk))) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on nr_sack chunk, too small\n");
ignore_nr_sack:
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
}
return (NULL);
}
/*
* EY nr_sacks have not been negotiated but
* the peer end sent an nr_sack, silently
* discard the chunk
*/
if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)) {
if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
stcb->asoc.peer_supports_nr_sack)) {
goto unknown_chunk;
}
if (stcb == NULL) {
SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
break;
}
if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
break;
}
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/*-
* If we have sent a shutdown-ack, we will pay no
* attention to a sack sent in to us since
* we don't care anymore.
*/
goto ignore_nr_sack;
break;
}
nr_sack = (struct sctp_nr_sack_chunk *)ch;
nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
flags = ch->chunk_flags;
nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
if (sizeof(struct sctp_nr_sack_chunk) +
(num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
num_dup * sizeof(uint32_t) != chk_length) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
break;
}
offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
cum_ack,
num_seg,
a_rwnd
);
cum_ack, num_seg, a_rwnd);
stcb->asoc.seen_a_sack_this_pkt = 1;
if ((stcb->asoc.pr_sctp_cnt == 0) &&
(num_seg == 0) &&
(num_seg == 0) && (num_nr_seg == 0) &&
((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
(cum_ack == stcb->asoc.last_acked_seq)) &&
(stcb->asoc.saw_sack_with_frags == 0) &&
(!TAILQ_EMPTY(&stcb->asoc.sent_queue))
) {
(!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
/*
* We have a SIMPLE sack having no
* prior segments and data on sent
* queue to be acked.. Use the
* faster path sack processing. We
* also allow window update sacks
* with no missing segments to go
* this way too.
* queue to be acked. Use the faster
* path sack processing. We also
* allow window update sacks with no
* missing segments to go this way
* too.
*/
sctp_express_handle_nr_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
&abort_now);
} else {
if (netp && *netp)
sctp_handle_nr_sack(m, *offset,
nr_sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
}
if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
(stcb->asoc.stream_queue_cnt == 0)) {
sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
sctp_handle_sack(m, offset_seg, offset_dup,
stcb, *netp,
num_seg, num_nr_seg, num_dup, &abort_now, flags,
cum_ack, a_rwnd);
}
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
return (NULL);
}
if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
(stcb->asoc.stream_queue_cnt == 0)) {
sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
}
}
break;

View File

@ -10172,7 +10172,6 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
struct sctp_nr_sack_chunk *nr_sack;
struct sctp_gap_ack_block *gap_descriptor;
struct sctp_nr_gap_ack_block *nr_gap_descriptor;
struct sack_track *selector;
struct sack_track *nr_selector;
@ -10433,8 +10432,6 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
}
/*---------------------------------------------------------filling the nr_gap_ack blocks----------------------------------------------------*/
nr_gap_descriptor = (struct sctp_nr_gap_ack_block *)gap_descriptor;
/* EY - there will be gaps + nr_gaps if draining is possible */
if ((SCTP_BASE_SYSCTL(sctp_do_drain)) && (limit_reached == 0)) {
@ -10470,7 +10467,7 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
* ok to merge.
*/
num_nr_gap_blocks--;
nr_gap_descriptor--;
gap_descriptor--;
}
if (nr_selector->num_entries == 0)
mergeable = 0;
@ -10489,12 +10486,12 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
* left side
*/
mergeable = 0;
nr_gap_descriptor->start = htons((nr_selector->gaps[j].start + offset));
gap_descriptor->start = htons((nr_selector->gaps[j].start + offset));
}
nr_gap_descriptor->end = htons((nr_selector->gaps[j].end + offset));
gap_descriptor->end = htons((nr_selector->gaps[j].end + offset));
num_nr_gap_blocks++;
nr_gap_descriptor++;
if (((caddr_t)nr_gap_descriptor + sizeof(struct sctp_nr_gap_ack_block)) > limit) {
gap_descriptor++;
if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
/* no more room */
limit_reached = 1;
break;
@ -10517,7 +10514,7 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
/* now we must add any dups we are going to report. */
if ((limit_reached == 0) && (asoc->numduptsns)) {
dup = (uint32_t *) nr_gap_descriptor;
dup = (uint32_t *) gap_descriptor;
for (i = 0; i < asoc->numduptsns; i++) {
*dup = htonl(asoc->dup_tsns[i]);
dup++;
@ -10537,10 +10534,9 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
num_nr_gap_blocks = num_gap_blocks;
num_gap_blocks = 0;
}
a_chk->send_size = (sizeof(struct sctp_nr_sack_chunk) +
(num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
(num_nr_gap_blocks * sizeof(struct sctp_nr_gap_ack_block)) +
(num_dups * sizeof(int32_t)));
a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
(num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
num_dups * sizeof(int32_t);
SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);

View File

@ -1043,8 +1043,6 @@ struct sctp_association {
uint8_t delayed_connection;
uint8_t ifp_had_enobuf;
uint8_t saw_sack_with_frags;
/* EY */
uint8_t saw_sack_with_nr_frags;
uint8_t in_asocid_hash;
uint8_t assoc_up_sent;
uint8_t adaptation_needed;