- Add more comments to sctps_stats struture in sctp_uio.h
- Fix bug that prevented EEOR mode from working and simplified the can_we_split code in the process. - Reduce lock contention for the tcb_send_lock. I did this especially for EEOR mode, still need to look at why I need a lock when removing from the tailq and the ->next is NOT null. A lock fixes it but it implies a bug yet exists. - Activated Andre's proposed changes to better use the mbuf infrastructure. - Fixed places that were not using the aloc macro's to take advantage of the per assoc cache. - Adds ifdef fix so any logging will enable stat_logging to get the right data structures in place (suggested by Max Laier).
This commit is contained in:
parent
31175191f1
commit
0eed90f74a
@ -3166,55 +3166,28 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
|
||||
int how, int allonebuf, int type)
|
||||
{
|
||||
struct mbuf *m = NULL;
|
||||
int aloc_size;
|
||||
int index = 0;
|
||||
int mbuf_threshold;
|
||||
|
||||
if (want_header) {
|
||||
MGETHDR(m, how, type);
|
||||
} else {
|
||||
MGET(m, how, type);
|
||||
}
|
||||
if (m == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
if (allonebuf == 0)
|
||||
mbuf_threshold = sctp_mbuf_threshold_count;
|
||||
else
|
||||
mbuf_threshold = 1;
|
||||
m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
|
||||
if (allonebuf) {
|
||||
int siz;
|
||||
|
||||
|
||||
if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
|
||||
try_again:
|
||||
index = 4;
|
||||
if (space_needed <= MCLBYTES) {
|
||||
aloc_size = MCLBYTES;
|
||||
} else if (space_needed <= MJUMPAGESIZE) {
|
||||
aloc_size = MJUMPAGESIZE;
|
||||
index = 5;
|
||||
} else if (space_needed <= MJUM9BYTES) {
|
||||
aloc_size = MJUM9BYTES;
|
||||
index = 6;
|
||||
if (SCTP_BUF_IS_EXTENDED(m)) {
|
||||
siz = SCTP_BUF_EXTEND_SIZE(m);
|
||||
} else {
|
||||
aloc_size = MJUM16BYTES;
|
||||
index = 7;
|
||||
if (want_header)
|
||||
siz = MHLEN;
|
||||
else
|
||||
siz = MLEN;
|
||||
}
|
||||
m_cljget(m, how, aloc_size);
|
||||
if (m == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
if (SCTP_BUF_IS_EXTENDED(m) == 0) {
|
||||
if ((aloc_size != MCLBYTES) &&
|
||||
(allonebuf == 0)) {
|
||||
aloc_size -= 10;
|
||||
goto try_again;
|
||||
}
|
||||
sctp_m_freem(m);
|
||||
if (siz < space_needed) {
|
||||
m_freem(m);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
SCTP_BUF_LEN(m) = 0;
|
||||
SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
|
||||
if (SCTP_BUF_NEXT(m)) {
|
||||
sctp_m_freem(SCTP_BUF_NEXT(m));
|
||||
SCTP_BUF_NEXT(m) = NULL;
|
||||
}
|
||||
#ifdef SCTP_MBUF_LOGGING
|
||||
if (SCTP_BUF_IS_EXTENDED(m)) {
|
||||
sctp_log_mb(m, SCTP_MBUF_IALLOC);
|
||||
@ -5393,7 +5366,7 @@ sctp_msg_append(struct sctp_tcb *stcb,
|
||||
error = ECONNRESET;
|
||||
goto out_now;
|
||||
}
|
||||
sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending);
|
||||
sctp_alloc_a_strmoq(stcb, sp);
|
||||
if (sp == NULL) {
|
||||
error = ENOMEM;
|
||||
goto out_now;
|
||||
@ -5434,7 +5407,7 @@ sctp_msg_append(struct sctp_tcb *stcb,
|
||||
}
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
sctp_snd_sb_alloc(stcb, sp->length);
|
||||
stcb->asoc.stream_queue_cnt++;
|
||||
atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
|
||||
TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
|
||||
if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
|
||||
sp->strseq = strm->next_sequence_sent;
|
||||
@ -6098,47 +6071,43 @@ sctp_can_we_split_this(struct sctp_tcb *stcb,
|
||||
{
|
||||
/*
|
||||
* Make a decision on if I should split a msg into multiple parts.
|
||||
* This is only asked of incomplete messages.
|
||||
*/
|
||||
if (goal_mtu < sctp_min_split_point) {
|
||||
/* you don't want enough */
|
||||
return (0);
|
||||
}
|
||||
if ((sp->length <= goal_mtu) || ((sp->length - goal_mtu) < sctp_min_residual)) {
|
||||
/* Sub-optimial residual don't split */
|
||||
return (0);
|
||||
}
|
||||
if (sp->msg_is_complete == 0) {
|
||||
if (eeor_on) {
|
||||
/*
|
||||
* If we are doing EEOR we need to always send it if
|
||||
* its the entire thing.
|
||||
if (eeor_on) {
|
||||
/*
|
||||
* If we are doing EEOR we need to always send it if its the
|
||||
* entire thing, since it might be all the guy is putting in
|
||||
* the hopper.
|
||||
*/
|
||||
if (goal_mtu >= sp->length) {
|
||||
/*-
|
||||
* If we have data outstanding,
|
||||
* we get another chance when the sack
|
||||
* arrives to transmit - wait for more data
|
||||
*
|
||||
* Could this be optimized to be aware of
|
||||
* two packets?
|
||||
*
|
||||
*/
|
||||
if (goal_mtu >= sp->length)
|
||||
if (stcb->asoc.total_flight > 0)
|
||||
return (0);
|
||||
else
|
||||
return (sp->length);
|
||||
} else {
|
||||
if (goal_mtu >= sp->length) {
|
||||
/*
|
||||
* If we cannot fill the amount needed there
|
||||
* is no sense of splitting the chunk.
|
||||
*/
|
||||
return (0);
|
||||
}
|
||||
/* You can fill the rest */
|
||||
return (goal_mtu);
|
||||
}
|
||||
/*
|
||||
* If we reach here sp->length is larger than the goal_mtu.
|
||||
* Do we wish to split it for the sake of packet putting
|
||||
* together?
|
||||
*/
|
||||
if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) {
|
||||
/* Its ok to split it */
|
||||
return (min(goal_mtu, frag_point));
|
||||
}
|
||||
} else {
|
||||
/* We can always split a complete message to make it fit */
|
||||
if (goal_mtu >= sp->length)
|
||||
/* Take it all */
|
||||
return (sp->length);
|
||||
|
||||
}
|
||||
if ((sp->length <= goal_mtu) || ((sp->length - goal_mtu) < sctp_min_residual)) {
|
||||
/* Sub-optimial residual don't split in non-eeor mode. */
|
||||
return (0);
|
||||
}
|
||||
/*
|
||||
* If we reach here sp->length is larger than the goal_mtu. Do we
|
||||
* wish to split it for the sake of packet putting together?
|
||||
*/
|
||||
if (goal_mtu >= min(sctp_min_split_point, frag_point)) {
|
||||
/* Its ok to split it */
|
||||
return (min(goal_mtu, frag_point));
|
||||
}
|
||||
/* Nope, can't split */
|
||||
@ -6164,13 +6133,20 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
uint8_t rcv_flags = 0;
|
||||
uint8_t some_taken;
|
||||
uint8_t took_all = 0;
|
||||
uint8_t send_lock_up = 0;
|
||||
|
||||
SCTP_TCB_LOCK_ASSERT(stcb);
|
||||
asoc = &stcb->asoc;
|
||||
one_more_time:
|
||||
sp = TAILQ_FIRST(&strq->outqueue);
|
||||
if (sp == NULL) {
|
||||
*locked = 0;
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
sp = TAILQ_FIRST(&strq->outqueue);
|
||||
if (sp) {
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
goto one_more_time;
|
||||
}
|
||||
if (strq->last_msg_incomplete) {
|
||||
printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
|
||||
strq->stream_no, strq->last_msg_incomplete);
|
||||
@ -6179,16 +6155,52 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
return (0);
|
||||
}
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
|
||||
/* Must wait for more data, must be last msg */
|
||||
*locked = 1;
|
||||
*giveup = 1;
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
return (0);
|
||||
} else if (sp->length == 0) {
|
||||
/* This should not happen */
|
||||
panic("sp length is 0?");
|
||||
if (sp->msg_is_complete) {
|
||||
if (sp->length == 0) {
|
||||
if (sp->sender_all_done) {
|
||||
/*
|
||||
* We are doing deffered cleanup. Last time
|
||||
* through when we took all the data the
|
||||
* sender_all_done was not set.
|
||||
*/
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
send_lock_up = 1;
|
||||
atomic_subtract_int(&asoc->stream_queue_cnt, 1);
|
||||
TAILQ_REMOVE(&strq->outqueue, sp, next);
|
||||
sctp_free_remote_addr(sp->net);
|
||||
if (sp->data) {
|
||||
sctp_m_freem(sp->data);
|
||||
sp->data = NULL;
|
||||
}
|
||||
sctp_free_a_strmoq(stcb, sp);
|
||||
|
||||
/* we can't be locked to it */
|
||||
*locked = 0;
|
||||
stcb->asoc.locked_on_sending = NULL;
|
||||
if (send_lock_up) {
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
send_lock_up = 0;
|
||||
}
|
||||
/* back to get the next msg */
|
||||
goto one_more_time;
|
||||
} else {
|
||||
/*
|
||||
* sender just finished this but still holds
|
||||
* a reference
|
||||
*/
|
||||
*locked = 1;
|
||||
*giveup = 1;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* is there some to get */
|
||||
if (sp->length == 0) {
|
||||
/* no */
|
||||
*locked = 1;
|
||||
*giveup = 1;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
some_taken = sp->some_taken;
|
||||
if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
|
||||
@ -6227,16 +6239,19 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
*locked = 1;
|
||||
}
|
||||
*giveup = 1;
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
|
||||
/* If we reach here, we can copy out a chunk */
|
||||
sctp_alloc_a_chunk(stcb, chk);
|
||||
if (chk == NULL) {
|
||||
/* No chunk memory */
|
||||
out_gu:
|
||||
if (send_lock_up) {
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
send_lock_up = 0;
|
||||
}
|
||||
*giveup = 1;
|
||||
return (0);
|
||||
}
|
||||
@ -6250,12 +6265,19 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
/* clear out the chunk before setting up */
|
||||
memset(chk, sizeof(*chk), 0);
|
||||
chk->rec.data.rcv_flags = rcv_flags;
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
if (SCTP_BUF_IS_EXTENDED(sp->data)) {
|
||||
chk->copy_by_ref = 1;
|
||||
} else {
|
||||
chk->copy_by_ref = 0;
|
||||
}
|
||||
if ((sp->msg_is_complete == 0) &&
|
||||
(to_move >= sp->length)) {
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
send_lock_up = 1;
|
||||
} else {
|
||||
send_lock_up = 0;
|
||||
}
|
||||
|
||||
if (to_move >= sp->length) {
|
||||
/* we can steal the whole thing */
|
||||
chk->data = sp->data;
|
||||
@ -6271,7 +6293,6 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
if (chk->data == NULL) {
|
||||
sp->some_taken = some_taken;
|
||||
sctp_free_a_chunk(stcb, chk);
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
goto out_gu;
|
||||
}
|
||||
/* Pull off the data */
|
||||
@ -6283,6 +6304,7 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
SCTP_BUF_NEXT(m) = NULL;
|
||||
if (sp->tail_mbuf == m) {
|
||||
/* freeing tail */
|
||||
printf("Huh, freeing tail? - TSNH\n");
|
||||
sp->tail_mbuf = sp->data;
|
||||
}
|
||||
sctp_m_free(m);
|
||||
@ -6292,9 +6314,8 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
if (to_move > sp->length) {
|
||||
panic("Huh, how can to_move be larger?");
|
||||
} else {
|
||||
sp->length -= to_move;
|
||||
atomic_subtract_int(&sp->length, to_move);
|
||||
}
|
||||
|
||||
if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) {
|
||||
/* Not enough room for a chunk header, get some */
|
||||
struct mbuf *m;
|
||||
@ -6306,6 +6327,10 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
* all the data if there is no leading space, so we
|
||||
* must put the data back and restore.
|
||||
*/
|
||||
if (send_lock_up == 0) {
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
send_lock_up = 1;
|
||||
}
|
||||
if (took_all) {
|
||||
/* unsteal the data */
|
||||
sp->data = chk->data;
|
||||
@ -6322,7 +6347,6 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
sp->length += to_move;
|
||||
chk->data = NULL;
|
||||
sctp_free_a_chunk(stcb, chk);
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
goto out_gu;
|
||||
} else {
|
||||
SCTP_BUF_LEN(m) = 0;
|
||||
@ -6334,8 +6358,8 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
|
||||
if (chk->data == NULL) {
|
||||
/* HELP */
|
||||
printf("prepend fails HELP?\n");
|
||||
sctp_free_a_chunk(stcb, chk);
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
goto out_gu;
|
||||
}
|
||||
sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
|
||||
@ -6423,9 +6447,13 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
if (sp->pr_sctp_on) {
|
||||
sctp_set_prsctp_policy(stcb, sp);
|
||||
}
|
||||
if (sp->msg_is_complete && (sp->length == 0)) {
|
||||
if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
|
||||
/* All done pull and kill the message */
|
||||
asoc->stream_queue_cnt--;
|
||||
atomic_subtract_int(&asoc->stream_queue_cnt, 1);
|
||||
if (send_lock_up == 0) {
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
send_lock_up = 1;
|
||||
}
|
||||
TAILQ_REMOVE(&strq->outqueue, sp, next);
|
||||
sctp_free_remote_addr(sp->net);
|
||||
if (sp->data) {
|
||||
@ -6450,7 +6478,10 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
}
|
||||
TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
|
||||
asoc->send_queue_cnt++;
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
if (send_lock_up) {
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
send_lock_up = 0;
|
||||
}
|
||||
return (to_move);
|
||||
}
|
||||
|
||||
@ -7009,8 +7040,8 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
|
||||
no_fragmentflg, 0, NULL, asconf))) {
|
||||
if (error == ENOBUFS) {
|
||||
asoc->ifp_had_enobuf = 1;
|
||||
SCTP_STAT_INCR(sctps_lowlevelerr);
|
||||
}
|
||||
SCTP_STAT_INCR(sctps_lowlevelerr);
|
||||
if (from_where == 0) {
|
||||
SCTP_STAT_INCR(sctps_lowlevelerrusr);
|
||||
}
|
||||
@ -7279,9 +7310,9 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
|
||||
asconf))) {
|
||||
/* error, we could not output */
|
||||
if (error == ENOBUFS) {
|
||||
SCTP_STAT_INCR(sctps_lowlevelerr);
|
||||
asoc->ifp_had_enobuf = 1;
|
||||
}
|
||||
SCTP_STAT_INCR(sctps_lowlevelerr);
|
||||
if (from_where == 0) {
|
||||
SCTP_STAT_INCR(sctps_lowlevelerrusr);
|
||||
}
|
||||
@ -10317,13 +10348,8 @@ sctp_copy_resume(struct sctp_stream_queue_pending *sp,
|
||||
uint32_t * sndout,
|
||||
struct mbuf **new_tail)
|
||||
{
|
||||
|
||||
/*#if defined(__FreeBSD__) && __FreeBSD_version >= 602000*/
|
||||
#if defined(_NOT_YET_)
|
||||
|
||||
struct mbuf *m;
|
||||
|
||||
/* take out max_hdr */
|
||||
m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
|
||||
(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
|
||||
if (m == NULL)
|
||||
@ -10332,55 +10358,6 @@ sctp_copy_resume(struct sctp_stream_queue_pending *sp,
|
||||
*sndout = m_length(m, NULL);
|
||||
*new_tail = m_last(m);
|
||||
return (m);
|
||||
#else
|
||||
int left, cancpy, willcpy;
|
||||
struct mbuf *m, *prev, *head;
|
||||
|
||||
left = min(uio->uio_resid, max_send_len);
|
||||
/* Always get a header just in case */
|
||||
head = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
|
||||
cancpy = M_TRAILINGSPACE(head);
|
||||
willcpy = min(cancpy, left);
|
||||
*error = uiomove(mtod(head, caddr_t), willcpy, uio);
|
||||
if (*error) {
|
||||
sctp_m_freem(head);
|
||||
return (NULL);
|
||||
}
|
||||
*sndout += willcpy;
|
||||
left -= willcpy;
|
||||
SCTP_BUF_LEN(head) = willcpy;
|
||||
m = head;
|
||||
*new_tail = head;
|
||||
while (left > 0) {
|
||||
/* move in user data */
|
||||
SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
|
||||
if (SCTP_BUF_NEXT(m) == NULL) {
|
||||
sctp_m_freem(head);
|
||||
*new_tail = NULL;
|
||||
*error = ENOMEM;
|
||||
return (NULL);
|
||||
}
|
||||
prev = m;
|
||||
m = SCTP_BUF_NEXT(m);
|
||||
cancpy = M_TRAILINGSPACE(m);
|
||||
willcpy = min(cancpy, left);
|
||||
*error = uiomove(mtod(m, caddr_t), willcpy, uio);
|
||||
if (*error) {
|
||||
sctp_m_freem(head);
|
||||
*new_tail = NULL;
|
||||
*error = EFAULT;
|
||||
return (NULL);
|
||||
}
|
||||
SCTP_BUF_LEN(m) = willcpy;
|
||||
left -= willcpy;
|
||||
*sndout += willcpy;
|
||||
*new_tail = m;
|
||||
if (left == 0) {
|
||||
SCTP_BUF_NEXT(m) = NULL;
|
||||
}
|
||||
}
|
||||
return (head);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int
|
||||
@ -10390,65 +10367,14 @@ sctp_copy_one(struct sctp_stream_queue_pending *sp,
|
||||
{
|
||||
int left;
|
||||
|
||||
/*#if defined(__FreeBSD__) && __FreeBSD_version >= 602000*/
|
||||
#if defined(_NOT_YET_)
|
||||
left = sp->length;
|
||||
sp->data = m_uiotombuf(uio, M_WAITOK, 0, resv_upfront, M_PKTHDR);
|
||||
sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
|
||||
resv_upfront, M_PKTHDR);
|
||||
if (sp->data == NULL)
|
||||
return (ENOMEM);
|
||||
sp->length = m_length(sp->data, NULL);
|
||||
|
||||
sp->tail_mbuf = m_last(sp->data);
|
||||
return (0);
|
||||
#else
|
||||
int cancpy, willcpy, error;
|
||||
struct mbuf *m, *head;
|
||||
int cpsz = 0;
|
||||
|
||||
/* First one gets a header */
|
||||
left = sp->length;
|
||||
head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAIT, 0, MT_DATA);
|
||||
if (m == NULL) {
|
||||
return (ENOMEM);
|
||||
}
|
||||
/*-
|
||||
* Add this one for m in now, that way if the alloc fails we won't
|
||||
* have a bad cnt.
|
||||
*/
|
||||
SCTP_BUF_RESV_UF(m, resv_upfront);
|
||||
cancpy = M_TRAILINGSPACE(m);
|
||||
willcpy = min(cancpy, left);
|
||||
while (left > 0) {
|
||||
/* move in user data */
|
||||
error = uiomove(mtod(m, caddr_t), willcpy, uio);
|
||||
if (error) {
|
||||
sctp_m_freem(head);
|
||||
return (error);
|
||||
}
|
||||
SCTP_BUF_LEN(m) = willcpy;
|
||||
left -= willcpy;
|
||||
cpsz += willcpy;
|
||||
if (left > 0) {
|
||||
SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
|
||||
if (SCTP_BUF_NEXT(m) == NULL) {
|
||||
/*
|
||||
* the head goes back to caller, he can free
|
||||
* the rest
|
||||
*/
|
||||
sctp_m_freem(head);
|
||||
return (ENOMEM);
|
||||
}
|
||||
m = SCTP_BUF_NEXT(m);
|
||||
cancpy = M_TRAILINGSPACE(m);
|
||||
willcpy = min(cancpy, left);
|
||||
} else {
|
||||
sp->tail_mbuf = m;
|
||||
SCTP_BUF_NEXT(m) = NULL;
|
||||
}
|
||||
}
|
||||
sp->data = head;
|
||||
sp->length = cpsz;
|
||||
return (0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -10491,13 +10417,14 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
|
||||
*error = ECONNRESET;
|
||||
goto out_now;
|
||||
}
|
||||
sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending);
|
||||
sctp_alloc_a_strmoq(stcb, sp);
|
||||
if (sp == NULL) {
|
||||
*error = ENOMEM;
|
||||
goto out_now;
|
||||
}
|
||||
SCTP_INCR_STRMOQ_COUNT();
|
||||
sp->act_flags = 0;
|
||||
sp->sender_all_done = 0;
|
||||
sp->sinfo_flags = srcv->sinfo_flags;
|
||||
sp->timetolive = srcv->sinfo_timetolive;
|
||||
sp->ppid = srcv->sinfo_ppid;
|
||||
@ -10516,6 +10443,7 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
|
||||
} else {
|
||||
sp->msg_is_complete = 0;
|
||||
}
|
||||
sp->sender_all_done = 0;
|
||||
sp->some_taken = 0;
|
||||
resv_in_first = sizeof(struct sctp_data_chunk);
|
||||
sp->data = sp->tail_mbuf = NULL;
|
||||
@ -10541,6 +10469,8 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int
|
||||
sctp_sosend(struct socket *so,
|
||||
struct sockaddr *addr,
|
||||
@ -11122,6 +11052,11 @@ sctp_lower_sosend(struct socket *so,
|
||||
atomic_add_int(&stcb->total_sends, 1);
|
||||
if (top == NULL) {
|
||||
struct sctp_stream_queue_pending *sp;
|
||||
|
||||
#ifdef INVARIANTS
|
||||
struct sctp_stream_queue_pending *msp;
|
||||
|
||||
#endif
|
||||
struct sctp_stream_out *strm;
|
||||
uint32_t sndout, initial_out;
|
||||
int user_marks_eor;
|
||||
@ -11151,6 +11086,11 @@ sctp_lower_sosend(struct socket *so,
|
||||
goto out;
|
||||
}
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
#ifdef INVARIANTS
|
||||
msp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
|
||||
if (msp && (msp->msg_is_complete == 0))
|
||||
panic("Huh, new mesg and old not done?");
|
||||
#endif
|
||||
if (sp->msg_is_complete) {
|
||||
strm->last_msg_incomplete = 0;
|
||||
asoc->stream_locked = 0;
|
||||
@ -11162,10 +11102,11 @@ sctp_lower_sosend(struct socket *so,
|
||||
strm->last_msg_incomplete = 1;
|
||||
asoc->stream_locked = 1;
|
||||
asoc->stream_locked_on = srcv->sinfo_stream;
|
||||
sp->sender_all_done = 0;
|
||||
}
|
||||
sctp_snd_sb_alloc(stcb, sp->length);
|
||||
|
||||
asoc->stream_queue_cnt++;
|
||||
atomic_add_int(&asoc->stream_queue_cnt, 1);
|
||||
TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
|
||||
if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
|
||||
sp->strseq = strm->next_sequence_sent;
|
||||
@ -11186,7 +11127,9 @@ sctp_lower_sosend(struct socket *so,
|
||||
}
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
} else {
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
if (sp == NULL) {
|
||||
/* ???? Huh ??? last msg is gone */
|
||||
#ifdef INVARIANTS
|
||||
@ -11233,6 +11176,7 @@ sctp_lower_sosend(struct socket *so,
|
||||
sctp_m_freem(mm);
|
||||
if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED)
|
||||
error = ECONNRESET;
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
goto out;
|
||||
}
|
||||
if (sp->tail_mbuf) {
|
||||
@ -11245,8 +11189,9 @@ sctp_lower_sosend(struct socket *so,
|
||||
sp->tail_mbuf = new_tail;
|
||||
}
|
||||
sctp_snd_sb_alloc(stcb, sndout);
|
||||
sp->length += sndout;
|
||||
atomic_add_int(&sp->length, sndout);
|
||||
len += sndout;
|
||||
|
||||
/* Did we reach EOR? */
|
||||
if ((uio->uio_resid == 0) &&
|
||||
((user_marks_eor == 0) ||
|
||||
@ -11428,13 +11373,20 @@ sctp_lower_sosend(struct socket *so,
|
||||
}
|
||||
}
|
||||
SCTP_TCB_SEND_LOCK(stcb);
|
||||
if (sp->msg_is_complete == 0) {
|
||||
strm->last_msg_incomplete = 1;
|
||||
asoc->stream_locked = 1;
|
||||
asoc->stream_locked_on = srcv->sinfo_stream;
|
||||
if (sp) {
|
||||
if (sp->msg_is_complete == 0) {
|
||||
strm->last_msg_incomplete = 1;
|
||||
asoc->stream_locked = 1;
|
||||
asoc->stream_locked_on = srcv->sinfo_stream;
|
||||
} else {
|
||||
sp->sender_all_done = 1;
|
||||
strm->last_msg_incomplete = 0;
|
||||
asoc->stream_locked = 0;
|
||||
}
|
||||
} else {
|
||||
strm->last_msg_incomplete = 0;
|
||||
asoc->stream_locked = 0;
|
||||
|
||||
}
|
||||
SCTP_TCB_SEND_UNLOCK(stcb);
|
||||
if (uio->uio_resid == 0) {
|
||||
|
@ -414,6 +414,7 @@ struct sctp_stream_queue_pending {
|
||||
uint8_t some_taken;
|
||||
uint8_t addr_over;
|
||||
uint8_t pr_sctp_on;
|
||||
uint8_t sender_all_done;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -762,7 +762,7 @@ struct sctpstat {
|
||||
/* input statistics: */
|
||||
u_long sctps_recvpackets; /* total input packets */
|
||||
u_long sctps_recvdatagrams; /* total input datagrams */
|
||||
u_long sctps_recvpktwithdata;
|
||||
u_long sctps_recvpktwithdata; /* total packets that had data */
|
||||
u_long sctps_recvsacks; /* total input SACK chunks */
|
||||
u_long sctps_recvdata; /* total input DATA chunks */
|
||||
u_long sctps_recvdupdata; /* total input duplicate DATA chunks */
|
||||
@ -787,45 +787,58 @@ struct sctpstat {
|
||||
* chunks */
|
||||
u_long sctps_sendfastretrans; /* total output fast retransmitted
|
||||
* DATA chunks */
|
||||
u_long sctps_sendmultfastretrans; /* U-del */
|
||||
u_long sctps_sendmultfastretrans; /* total FR's that happened
|
||||
* more than once to same
|
||||
* chunk (u-del multi-fr
|
||||
* algo). */
|
||||
u_long sctps_sendheartbeat; /* total output HB chunks */
|
||||
u_long sctps_sendecne; /* total output ECNE chunks */
|
||||
u_long sctps_sendauth; /* total output AUTH chunks FIXME */
|
||||
u_long sctps_senderrors;/* ip_output error counter */
|
||||
/* PCKDROPREP statistics: */
|
||||
u_long sctps_pdrpfmbox; /* */
|
||||
u_long sctps_pdrpfehos; /* */
|
||||
u_long sctps_pdrpmbda; /* */
|
||||
u_long sctps_pdrpmbct; /* */
|
||||
u_long sctps_pdrpbwrpt; /* */
|
||||
u_long sctps_pdrpcrupt; /* */
|
||||
u_long sctps_pdrpnedat; /* */
|
||||
u_long sctps_pdrppdbrk; /* */
|
||||
u_long sctps_pdrptsnnf; /* */
|
||||
u_long sctps_pdrpdnfnd; /* */
|
||||
u_long sctps_pdrpdiwnp; /* */
|
||||
u_long sctps_pdrpdizrw; /* */
|
||||
u_long sctps_pdrpbadd; /* */
|
||||
u_long sctps_pdrpmark; /* */
|
||||
u_long sctps_pdrpfmbox; /* Packet drop from middle box */
|
||||
u_long sctps_pdrpfehos; /* P-drop from end host */
|
||||
u_long sctps_pdrpmbda; /* P-drops with data */
|
||||
u_long sctps_pdrpmbct; /* P-drops, non-data, non-endhost */
|
||||
u_long sctps_pdrpbwrpt; /* P-drop, non-endhost, bandwidth rep only */
|
||||
u_long sctps_pdrpcrupt; /* P-drop, not enough for chunk header */
|
||||
u_long sctps_pdrpnedat; /* P-drop, not enough data to confirm */
|
||||
u_long sctps_pdrppdbrk; /* P-drop, where process_chunk_drop said break */
|
||||
u_long sctps_pdrptsnnf; /* P-drop, could not find TSN */
|
||||
u_long sctps_pdrpdnfnd; /* P-drop, attempt reverse TSN lookup */
|
||||
u_long sctps_pdrpdiwnp; /* P-drop, e-host confirms zero-rwnd */
|
||||
u_long sctps_pdrpdizrw; /* P-drop, midbox confirms no space */
|
||||
u_long sctps_pdrpbadd; /* P-drop, data did not match TSN */
|
||||
u_long sctps_pdrpmark; /* P-drop, TSN's marked for Fast Retran */
|
||||
/* timeouts */
|
||||
u_long sctps_timoiterator; /* */
|
||||
u_long sctps_timodata; /* */
|
||||
u_long sctps_timowindowprobe; /* */
|
||||
u_long sctps_timoinit; /* */
|
||||
u_long sctps_timosack; /* */
|
||||
u_long sctps_timoshutdown; /* */
|
||||
u_long sctps_timoheartbeat; /* */
|
||||
u_long sctps_timocookie;/* */
|
||||
u_long sctps_timosecret;/* */
|
||||
u_long sctps_timopathmtu; /* */
|
||||
u_long sctps_timoshutdownack; /* */
|
||||
u_long sctps_timoshutdownguard; /* */
|
||||
u_long sctps_timostrmrst; /* */
|
||||
u_long sctps_timoearlyfr; /* */
|
||||
u_long sctps_timoasconf;/* */
|
||||
u_long sctps_timoautoclose; /* */
|
||||
u_long sctps_timoassockill; /* */
|
||||
u_long sctps_timoinpkill; /* */
|
||||
u_long sctps_timoiterator; /* Number of iterator timers that
|
||||
* fired */
|
||||
u_long sctps_timodata; /* Number of T3 data time outs */
|
||||
u_long sctps_timowindowprobe; /* Number of window probe (T3) timers
|
||||
* that fired */
|
||||
u_long sctps_timoinit; /* Number of INIT timers that fired */
|
||||
u_long sctps_timosack; /* Number of sack timers that fired */
|
||||
u_long sctps_timoshutdown; /* Number of shutdown timers that
|
||||
* fired */
|
||||
u_long sctps_timoheartbeat; /* Number of heartbeat timers that
|
||||
* fired */
|
||||
u_long sctps_timocookie;/* Number of times a cookie timeout fired */
|
||||
u_long sctps_timosecret;/* Number of times an endpoint changed its
|
||||
* cookie secret */
|
||||
u_long sctps_timopathmtu; /* Number of PMTU timers that fired */
|
||||
u_long sctps_timoshutdownack; /* Number of shutdown ack timers that
|
||||
* fired */
|
||||
u_long sctps_timoshutdownguard; /* Number of shutdown guard timers
|
||||
* that fired */
|
||||
u_long sctps_timostrmrst; /* Number of stream reset timers that
|
||||
* fired */
|
||||
u_long sctps_timoearlyfr; /* Number of early FR timers that
|
||||
* fired */
|
||||
u_long sctps_timoasconf;/* Number of times an asconf timer fired */
|
||||
u_long sctps_timoautoclose; /* Number of times auto close timer
|
||||
* fired */
|
||||
u_long sctps_timoassockill; /* Number of asoc free timers expired */
|
||||
u_long sctps_timoinpkill; /* Number of inp free timers expired */
|
||||
/* Early fast retransmission counters */
|
||||
u_long sctps_earlyfrstart;
|
||||
u_long sctps_earlyfrstop;
|
||||
@ -851,13 +864,22 @@ struct sctpstat {
|
||||
u_long sctps_naglesent; /* nagle allowed sending */
|
||||
u_long sctps_naglequeued; /* nagle does't allow sending */
|
||||
u_long sctps_maxburstqueued; /* max burst dosn't allow sending */
|
||||
u_long sctps_ifnomemqueued; /* */
|
||||
u_long sctps_ifnomemqueued; /* look ahead tells us no memory in
|
||||
* interface ring buffer OR we had a
|
||||
* send error and are queuing one
|
||||
* send. */
|
||||
u_long sctps_windowprobed; /* total number of window probes sent */
|
||||
u_long sctps_lowlevelerr;
|
||||
u_long sctps_lowlevelerrusr;
|
||||
u_long sctps_datadropchklmt;
|
||||
u_long sctps_datadroprwnd;
|
||||
u_long sctps_ecnereducedcwnd;
|
||||
u_long sctps_lowlevelerr; /* total times an output error causes
|
||||
* us to clamp down on next user send. */
|
||||
u_long sctps_lowlevelerrusr; /* total times sctp_senderrors were
|
||||
* caused from a user send from a user
|
||||
* invoked send not a sack response */
|
||||
u_long sctps_datadropchklmt; /* Number of in data drops due to
|
||||
* chunk limit reached */
|
||||
u_long sctps_datadroprwnd; /* Number of in data drops due to rwnd
|
||||
* limit reached */
|
||||
u_long sctps_ecnereducedcwnd; /* Number of times a ECN reduced the
|
||||
* cwnd */
|
||||
u_long sctps_vtagexpress; /* Used express lookup via vtag */
|
||||
u_long sctps_vtagbogus; /* Collision in express lookup. */
|
||||
u_long sctps_primary_randry; /* Number of times the sender ran dry
|
||||
@ -867,12 +889,16 @@ struct sctpstat {
|
||||
u_long sctps_wu_sacks_sent; /* Window Update only sacks sent */
|
||||
u_long sctps_sends_with_flags; /* number of sends with sinfo_flags
|
||||
* !=0 */
|
||||
u_long sctps_sends_with_unord;
|
||||
u_long sctps_sends_with_eof;
|
||||
u_long sctps_sends_with_abort;
|
||||
u_long sctps_protocol_drain_calls;
|
||||
u_long sctps_protocol_drains_done;
|
||||
u_long sctps_read_peeks;
|
||||
u_long sctps_sends_with_unord /* number of undordered sends */ ;
|
||||
u_long sctps_sends_with_eof; /* number of sends with EOF flag set */
|
||||
u_long sctps_sends_with_abort; /* number of sends with ABORT flag set */
|
||||
u_long sctps_protocol_drain_calls; /* number of times protocol
|
||||
* drain called */
|
||||
u_long sctps_protocol_drains_done; /* number of times we did a
|
||||
* protocol drain */
|
||||
u_long sctps_read_peeks;/* Number of times recv was called with peek */
|
||||
u_long sctps_cached_chk;/* Number of cached chunks used */
|
||||
u_long sctps_cached_strmoq; /* Number of cached stream oq's used */
|
||||
};
|
||||
|
||||
#define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1)
|
||||
|
@ -98,6 +98,7 @@ extern struct pr_usrreqs sctp_usrreqs;
|
||||
(_strmoq) = TAILQ_FIRST(&(_stcb)->asoc.free_strmoq); \
|
||||
TAILQ_REMOVE(&(_stcb)->asoc.free_strmoq, (_strmoq), next); \
|
||||
atomic_subtract_int(&sctppcbinfo.ipi_free_strmoq, 1); \
|
||||
SCTP_STAT_INCR(sctps_cached_strmoq); \
|
||||
(_stcb)->asoc.free_strmoq_cnt--; \
|
||||
} \
|
||||
}
|
||||
@ -125,6 +126,7 @@ extern struct pr_usrreqs sctp_usrreqs;
|
||||
(_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
|
||||
TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
|
||||
atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1); \
|
||||
SCTP_STAT_INCR(sctps_cached_chk); \
|
||||
(_stcb)->asoc.free_chunk_cnt--; \
|
||||
} \
|
||||
}
|
||||
|
@ -39,8 +39,54 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#if defined(_KERNEL)
|
||||
|
||||
#ifdef SCTP_MBUF_LOGGING
|
||||
|
||||
/*-
|
||||
* Any new logging added must also define SCTP_STAT_LOGGING if
|
||||
* its not already defined.
|
||||
*/
|
||||
#if defined(SCTP_LOG_MAXBURST) | defined(SCTP_LOG_RWND) | defined(SCTP_LOG_RWND)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SCTP_CWND_LOGGING) | defined(SCTP_CWND_MONITOR) | defined(SCTP_BLK_LOGGING)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SCTP_STR_LOGGING) | defined(SCTP_FR_LOGGING) defined(SCTP_MAP_LOGGING)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SCTP_SACK_LOGGING) | defined(SCTP_LOCK_LOGGING) | defined(SCTP_STAT_LOGGING)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SCTP_RTTVAR_LOGGING) | defined(SCTP_SB_LOGGING) | defined(SCTP_EARLYFR_LOGGING)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SCTP_NAGLE_LOGGING) | defined(SCTP_WAKE_LOGGING) | defined(SCTP_RECV_RWND_LOGGING)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SCTP_SACK_RWND_LOGGING) | defined(SCTP_FLIGHT_LOGGING) | defined(SCTP_MBUF_LOGGING)
|
||||
#ifndef SCTP_STAT_LOGGING
|
||||
#define SCTP_STAT_LOGGING 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef SCTP_MBUF_LOGGING
|
||||
struct mbuf *sctp_m_free(struct mbuf *m);
|
||||
void sctp_m_freem(struct mbuf *m);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user