- Fix a memory overwrite when the mapping array

is expanded, size of expansion was not taken int consideration.
-  Fix so vtag hash is 1 bigger so that it modulo's out
   correctly, avoids a panic when restart with right modulo happens.
-  do not dereference stcb when control->do_not_ref_stcb is set
-  Fix up packet logging to not often use a lock and also to
   add to options.
-  Fix some logging option duplication in the sctputil.h
This commit is contained in:
Randall Stewart 2007-05-30 17:39:45 +00:00
parent 4257f25c06
commit 0696e1203e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170138
11 changed files with 141 additions and 131 deletions

View File

@ -422,6 +422,7 @@ SCTP_RECV_RWND_LOGGING opt_sctp.h
SCTP_SACK_RWND_LOGGING opt_sctp.h
SCTP_FLIGHT_LOGGING opt_sctp.h
SCTP_MBUF_LOGGING opt_sctp.h
SCTP_PACKET_LOGGING opt_sctp.h
# Netgraph(4). Use option NETGRAPH to enable the base netgraph code.
# Each netgraph node type can be either be compiled into the kernel

View File

@ -374,112 +374,103 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
#ifdef SCTP_PACKET_LOGGING
int packet_log_start = 0;
int packet_log_writers = 0;
int packet_log_end = 0;
int packet_log_old_end = SCTP_PACKET_LOG_SIZE;
int packet_log_wrapped = 0;
uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE];
void
sctp_packet_log(struct mbuf *m, int length)
{
int *lenat, needed, thisone;
int *lenat, thisone;
void *copyto;
uint32_t *tick_tock;
int total_len, spare;
int total_len;
int grabbed_lock = 0;
int value, newval, thisend, thisbegin;
total_len = SCTP_SIZE32((length + (2 * sizeof(int))));
/*
* Buffer layout. -sizeof this entry (total_len) -previous end
* (value) -ticks of log (ticks) o -ip packet o -as logged -
* where this started (thisbegin) x <--end points here
*/
total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
/* Log a packet to the buffer. */
if (total_len > SCTP_PACKET_LOG_SIZE) {
/* Can't log this packet I have not a buffer big enough */
return;
}
if (length < (SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
printf("Huh, length is %d to small for sctp min:%d\n",
length,
(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk)));
return;
}
SCTP_IP_PKTLOG_LOCK();
if ((SCTP_PACKET_LOG_SIZE - packet_log_end) <= total_len) {
/*
* it won't fit on the end. We must go back to the
* beginning. To do this we go back and cahnge
* packet_log_start.
*/
int orig_end;
lenat = (int *)packet_log_buffer;
orig_end = packet_log_end;
packet_log_old_end = packet_log_end;
packet_log_end = 0;
if (packet_log_start > packet_log_old_end) {
/* calculate the head room */
spare = packet_log_start - packet_log_old_end;
atomic_add_int(&packet_log_writers, 1);
try_again:
if (packet_log_writers > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
SCTP_IP_PKTLOG_LOCK();
grabbed_lock = 1;
again_locked:
value = packet_log_end;
newval = packet_log_end + total_len;
if (newval >= SCTP_PACKET_LOG_SIZE) {
/* we wrapped */
thisbegin = 0;
thisend = total_len;
} else {
spare = 0;
thisbegin = packet_log_end;
thisend = newval;
}
needed = total_len - spare;
packet_log_wrapped = 1;
/* Now update the start */
while (needed > 0) {
thisone = (*(int *)(&packet_log_buffer[packet_log_start]));
needed -= thisone;
if (thisone == 0) {
int *foo;
foo = (int *)(&packet_log_buffer[packet_log_start]);
goto insane;
}
/* move to next one */
packet_log_start += thisone;
if (!(atomic_cmpset_int(&packet_log_end, value, thisend))) {
goto again_locked;
}
} else {
lenat = (int *)&packet_log_buffer[packet_log_end];
if (packet_log_start > packet_log_end) {
if ((packet_log_end + total_len) > packet_log_start) {
/* Now need to update killing some packets */
needed = total_len - ((packet_log_start - packet_log_end));
while (needed > 0) {
thisone = (*(int *)(&packet_log_buffer[packet_log_start]));
needed -= thisone;
if (thisone == 0) {
goto insane;
}
/* move to next one */
packet_log_start += thisone;
if (((packet_log_start + sizeof(struct ip)) > SCTP_PACKET_LOG_SIZE) ||
(packet_log_wrapped && (packet_log_start >= packet_log_old_end))) {
packet_log_start = 0;
packet_log_old_end = 0;
packet_log_wrapped = 0;
break;
}
}
}
value = packet_log_end;
newval = packet_log_end + total_len;
if (newval >= SCTP_PACKET_LOG_SIZE) {
/* we wrapped */
thisbegin = 0;
thisend = total_len;
} else {
thisbegin = packet_log_end;
thisend = newval;
}
if (!(atomic_cmpset_int(&packet_log_end, value, thisend))) {
goto try_again;
}
}
if (((packet_log_end + total_len) >= SCTP_PACKET_LOG_SIZE) ||
((void *)((caddr_t)lenat) < (void *)packet_log_buffer) ||
((void *)((caddr_t)lenat + total_len) > (void *)&packet_log_buffer[SCTP_PACKET_LOG_SIZE])) {
/* Madness protection */
insane:
printf("Went mad, end:%d start:%d len:%d wrapped:%d oe:%d - zapping\n",
packet_log_end, packet_log_start, total_len, packet_log_wrapped, packet_log_old_end);
packet_log_start = packet_log_end = packet_log_old_end = packet_log_wrapped = 0;
lenat = (int *)&packet_log_buffer[0];
/* Sanity check */
if (thisend >= SCTP_PACKET_LOG_SIZE) {
printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
thisbegin,
thisend,
packet_log_writers,
grabbed_lock,
packet_log_end);
packet_log_end = 0;
goto no_log;
}
lenat = (int *)&packet_log_buffer[thisbegin];
*lenat = total_len;
lenat++;
*lenat = value;
lenat++;
tick_tock = (uint32_t *) lenat;
lenat++;
*tick_tock = sctp_get_tick_count();
copyto = (void *)lenat;
packet_log_end = (((caddr_t)copyto + length) - (caddr_t)packet_log_buffer);
SCTP_IP_PKTLOG_UNLOCK();
thisone = thisend - sizeof(int);
lenat = (int *)&packet_log_buffer[thisone];
*lenat = thisbegin;
if (grabbed_lock) {
SCTP_IP_PKTLOG_UNLOCK();
grabbed_lock = 0;
}
m_copydata(m, 0, length, (caddr_t)copyto);
no_log:
if (grabbed_lock) {
SCTP_IP_PKTLOG_UNLOCK();
}
atomic_subtract_int(&packet_log_writers, 1);
}
@ -490,44 +481,43 @@ sctp_copy_out_packet_log(uint8_t * target, int length)
* We wind through the packet log starting at start copying up to
* length bytes out. We return the number of bytes copied.
*/
int tocopy, this_copy, copied = 0;
void *at;
int tocopy, this_copy;
int *lenat;
int did_delay = 0;
tocopy = length;
if (packet_log_start == packet_log_end) {
/* no data */
if (length < (2 * sizeof(int))) {
/* not enough room */
return (0);
}
if (packet_log_wrapped) {
/*
* we have a wrapped buffer, we must copy from start to the
* old end. Then copy from the top of the buffer to the end.
*/
SCTP_IP_PKTLOG_LOCK();
at = (void *)&packet_log_buffer[packet_log_start];
this_copy = min(tocopy, (packet_log_old_end - packet_log_start));
memcpy(target, at, this_copy);
tocopy -= this_copy;
copied += this_copy;
if (tocopy == 0) {
SCTP_IP_PKTLOG_UNLOCK();
return (copied);
if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
atomic_add_int(&packet_log_writers, SCTP_PKTLOG_WRITERS_NEED_LOCK);
again:
if ((did_delay == 0) && (packet_log_writers != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
/*
* we delay here for just a moment hoping the
* writer(s) that were present when we entered will
* have left and we only have locking ones that will
* contend with us for the lock. This does not
* assure 100% access, but its good enough for a
* logging facility like this.
*/
did_delay = 1;
DELAY(10);
goto again;
}
this_copy = min(tocopy, packet_log_end);
at = (void *)&packet_log_buffer;
memcpy(&target[copied], at, this_copy);
copied += this_copy;
SCTP_IP_PKTLOG_UNLOCK();
return (copied);
} else {
/* we have one contiguous buffer */
SCTP_IP_PKTLOG_LOCK();
at = (void *)&packet_log_buffer;
this_copy = min(length, packet_log_end);
memcpy(target, at, this_copy);
SCTP_IP_PKTLOG_UNLOCK();
return (this_copy);
}
SCTP_IP_PKTLOG_LOCK();
lenat = (int *)target;
*lenat = packet_log_end;
lenat++;
this_copy = min((length - sizeof(int)), packet_log_end);
memcpy((void *)lenat, (void *)packet_log_buffer, this_copy);
if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
atomic_subtract_int(&packet_log_writers, SCTP_PKTLOG_WRITERS_NEED_LOCK);
}
SCTP_IP_PKTLOG_UNLOCK();
return (this_copy + sizeof(int));
}
#endif

View File

@ -949,7 +949,9 @@ __FBSDID("$FreeBSD$");
* entries must be searched to see if the tag is in timed wait. If so we
* reject it.
*/
#define SCTP_STACK_VTAG_HASH_SIZE 31
#define SCTP_STACK_VTAG_HASH_SIZE 31
#define SCTP_STACK_VTAG_HASH_SIZE_A 32
/*
* If we use the per-endpoint model than we do not have a hash table of

View File

@ -1482,7 +1482,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
SCTP_TCB_LOCK_ASSERT(stcb);
if (sctp_expand_mapping_array(asoc)) {
if (sctp_expand_mapping_array(asoc, gap)) {
/* Can't expand, drop it */
return (0);
}

View File

@ -1377,9 +1377,10 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
asoc->str_reset_seq_in = asoc->init_seq_number;
asoc->advanced_peer_ack_point = asoc->last_acked_seq;
if (asoc->mapping_array)
if (asoc->mapping_array) {
memset(asoc->mapping_array, 0,
asoc->mapping_array_size);
}
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_INFO_WLOCK();
SCTP_INP_WLOCK(stcb->sctp_ep);

View File

@ -260,6 +260,15 @@ typedef struct callout sctp_os_timer_t;
} else if ((m->m_flags & M_EXT) == 0) { \
M_ALIGN(m, len); \
}
/* We make it so if you have up to 4 threads
* writting based on the default size of
* the packet log 65 k, that would be
* 4 16k packets before we would hit
* a problem.
*/
#define SCTP_PKTLOG_WRITERS_NEED_LOCK 3
/*************************/
/* MTU */
/*************************/

View File

@ -9529,7 +9529,7 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
atomic_add_int(&chk->whoTo->ref_count, 1);
/* Now we have a mbuf that we can fill in with the details */
hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
/* fill out chunk header */
hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
hb->ch.chunk_flags = 0;

View File

@ -3341,7 +3341,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
/* setup back pointer's */
stcb->sctp_ep = inp;
stcb->sctp_socket = inp->sctp_socket;
if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag, vrf_id))) {
if ((err = sctp_init_asoc(inp, stcb, for_a_init, override_tag, vrf_id))) {
/* failed */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
@ -4621,7 +4621,7 @@ sctp_pcb_init()
SCTP_OS_TIMER_INIT(&sctppcbinfo.addr_wq_timer.timer);
/* Init the TIMEWAIT list */
for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE_A; i++) {
LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
}

View File

@ -217,7 +217,7 @@ struct sctp_epinfo {
uint32_t ipi_free_strmoq;
struct sctpvtaghead vtag_timewait[SCTP_STACK_VTAG_HASH_SIZE];
struct sctpvtaghead vtag_timewait[SCTP_STACK_VTAG_HASH_SIZE_A];
/* address work queue handling */
#if defined(SCTP_USE_THREAD_BASED_ITERATOR)

View File

@ -901,9 +901,11 @@ sctp_select_a_tag(struct sctp_inpcb *m)
int
sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
int for_a_init, uint32_t override_tag, uint32_t vrf_id)
{
struct sctp_association *asoc;
/*
* Anything set to zero is taken care of by the allocation routine's
* bzero
@ -917,6 +919,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
*/
int i;
asoc = &stcb->asoc;
/* init all variables to a known value. */
asoc->state = SCTP_STATE_INUSE;
asoc->max_burst = m->sctp_ep.max_burst;
@ -1122,13 +1125,14 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
}
int
sctp_expand_mapping_array(struct sctp_association *asoc)
sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
{
/* mapping array needs to grow */
uint8_t *new_array;
uint16_t new_size;
uint32_t new_size;
new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
/* can't get more, forget it */
@ -4935,8 +4939,8 @@ sctp_sorecvmsg(struct socket *so,
}
stcb = control->stcb;
if (stcb) {
if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
(control->do_not_ref_stcb == 0)) {
if ((control->do_not_ref_stcb == 0) &&
(stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
if (freecnt_applied == 0)
stcb = NULL;
} else if (control->do_not_ref_stcb == 0) {
@ -4956,11 +4960,10 @@ sctp_sorecvmsg(struct socket *so,
* Setup to remember how much we have not yet told
* the peer our rwnd has opened up. Note we grab the
* value from the tcb from last time. Note too that
* sack sending clears this when a sack is sent..
* sack sending clears this when a sack is sent,
* which is fine. Once we hit the rwnd_req, we then
* will go to the sctp_user_rcvd() that will not
* lock until it KNOWs it MUST send a WUP-SACK.
*
*/
freed_so_far = stcb->freed_by_sorcv_sincelast;
stcb->freed_by_sorcv_sincelast = 0;
@ -5015,7 +5018,7 @@ sctp_sorecvmsg(struct socket *so,
/*
* update off the real current cum-ack, if we have an stcb.
*/
if (stcb)
if ((control->do_not_ref_stcb == 0) && stcb)
sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
/*
* mask off the high bits, we keep the actual chunk bits in
@ -5096,7 +5099,7 @@ sctp_sorecvmsg(struct socket *so,
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
goto release;
}
if (stcb &&
if ((control->do_not_ref_stcb == 0) && stcb &&
stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
no_rcv_needed = 1;
}
@ -5107,7 +5110,8 @@ sctp_sorecvmsg(struct socket *so,
if ((SCTP_BUF_NEXT(m) == NULL) &&
(cp_len >= SCTP_BUF_LEN(m)) &&
((control->end_added == 0) ||
(control->end_added && (TAILQ_NEXT(control, next) == NULL)))
(control->end_added &&
(TAILQ_NEXT(control, next) == NULL)))
) {
#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
sctp_misc_ints(SCTP_SORCV_DOESLCK,
@ -5215,7 +5219,8 @@ sctp_sorecvmsg(struct socket *so,
sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
#endif
atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
if (stcb) {
if ((control->do_not_ref_stcb == 0) &&
stcb) {
atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
}
copied_so_far += cp_len;
@ -5305,7 +5310,8 @@ sctp_sorecvmsg(struct socket *so,
control->data = NULL;
sctp_free_a_readq(stcb, control);
control = NULL;
if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0))
if ((freed_so_far >= rwnd_req) &&
(no_rcv_needed == 0))
sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
} else {
@ -5316,7 +5322,8 @@ sctp_sorecvmsg(struct socket *so,
* control to read.
*/
#ifdef INVARIANTS
if (control->end_added && (control->data == NULL) &&
if (control->end_added &&
(control->data == NULL) &&
(control->tail_mbuf == NULL)) {
panic("Gak, control->length is corrupt?");
}

View File

@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
* its not already defined.
*/
#if defined(SCTP_LOG_MAXBURST) || defined(SCTP_LOG_RWND) || defined(SCTP_LOG_RWND)
#if defined(SCTP_LOG_MAXBURST) || defined(SCTP_LOG_RWND)
#ifndef SCTP_STAT_LOGGING
#define SCTP_STAT_LOGGING 1
#endif
@ -63,7 +63,7 @@ __FBSDID("$FreeBSD$");
#endif
#endif
#if defined(SCTP_SACK_LOGGING) || defined(SCTP_LOCK_LOGGING) || defined(SCTP_STAT_LOGGING)
#if defined(SCTP_SACK_LOGGING) || defined(SCTP_LOCK_LOGGING)
#ifndef SCTP_STAT_LOGGING
#define SCTP_STAT_LOGGING 1
#endif
@ -128,7 +128,7 @@ uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
uint32_t sctp_select_a_tag(struct sctp_inpcb *);
int sctp_init_asoc(struct sctp_inpcb *, struct sctp_association *, int, uint32_t, uint32_t);
int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, int, uint32_t, uint32_t);
void sctp_fill_random_store(struct sctp_pcb *);
@ -200,7 +200,7 @@ void sctp_stop_timers_for_shutdown(struct sctp_tcb *);
void sctp_report_all_outbound(struct sctp_tcb *, int);
int sctp_expand_mapping_array(struct sctp_association *);
int sctp_expand_mapping_array(struct sctp_association *, uint32_t);
void sctp_abort_notification(struct sctp_tcb *, int);