Tons of fixes to get all the 64bit issues removed.

This also moves two 16 bit int's to become 32 bit
values so we do not have to use atomic_add_16.
Most of the changes are %p, casts and other various
nasty's that were in the orignal code base. With this
commit my machine will now do a build universe.. however
I as yet have not tested on a 64bit machine .. it may not work :-(
This commit is contained in:
Randall Stewart 2006-11-05 13:25:18 +00:00
parent b16b2bd274
commit 50cec91936
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=163996
11 changed files with 91 additions and 73 deletions

View File

@ -812,7 +812,7 @@ sctp_choose_v4_boundall(struct sctp_inpcb *inp,
loopscope, ipv4_scope, &sin_loop, &sin_local);
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
printf("Found ifn:%x %d prefered source addresses\n", (uint32_t) ifn, num_prefered);
printf("Found ifn:%p %d prefered source addresses\n", ifn, num_prefered);
}
#endif
if (num_prefered == 0) {
@ -1521,7 +1521,7 @@ sctp_choose_v6_boundall(struct sctp_inpcb *inp,
if (sin6) {
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
printf("Selected address %d ifn:%x for the route\n", cur_addr_num, (uint32_t) ifn);
printf("Selected address %d ifn:%p for the route\n", cur_addr_num, ifn);
}
#endif
if (net) {
@ -1547,14 +1547,14 @@ sctp_choose_v6_boundall(struct sctp_inpcb *inp,
inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
printf("Start at first IFN:%x\n", (uint32_t) inp->next_ifn_touse);
printf("Start at first IFN:%p\n", inp->next_ifn_touse);
}
#endif
} else {
inp->next_ifn_touse = TAILQ_NEXT(inp->next_ifn_touse, if_list);
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
printf("Resume at IFN:%x\n", (uint32_t) inp->next_ifn_touse);
printf("Resume at IFN:%p\n", inp->next_ifn_touse);
}
#endif
if (inp->next_ifn_touse == NULL) {
@ -1598,7 +1598,7 @@ sctp_choose_v6_boundall(struct sctp_inpcb *inp,
num_eligible_addr = sctp_count_v6_num_eligible_boundall(ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
printf("IFN:%x has %d eligible\n", (uint32_t) ifn, num_eligible_addr);
printf("IFN:%p has %d eligible\n", ifn, num_eligible_addr);
}
#endif
if (num_eligible_addr == 0) {
@ -1629,9 +1629,9 @@ sctp_choose_v6_boundall(struct sctp_inpcb *inp,
}
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
printf("Selected the %d'th address of ifn:%x\n",
printf("Selected the %d'th address of ifn:%p\n",
cur_addr_num,
(uint32_t) ifn);
ifn);
}
#endif
return (sin6);

View File

@ -589,7 +589,7 @@ update_crc32(uint32_t crc32,
if (length == 0) {
return (crc32);
}
offset = (uintptr_t) buffer & 3;
offset = (uintptr_t) (buffer & ~0x3);
return (sctp_crc32c_sb8_64_bit(crc32, buffer, length, offset));
}

View File

@ -3760,7 +3760,7 @@ struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
};
static void
sctp_hs_cwnd_increase(struct sctp_nets *net)
sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
{
int cur_val, i, indx, incr;
@ -3797,7 +3797,7 @@ sctp_hs_cwnd_increase(struct sctp_nets *net)
}
static void
sctp_hs_cwnd_decrease(struct sctp_nets *net)
sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
{
int cur_val, i, indx;
@ -3958,7 +3958,7 @@ sctp_cwnd_update(struct sctp_tcb *stcb,
if (net->flight_size + net->net_ack >=
net->cwnd) {
#ifdef SCTP_HIGH_SPEED
sctp_hs_cwnd_increase(net);
sctp_hs_cwnd_increase(stcb, net);
#else
if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
net->cwnd += (net->mtu * sctp_L2_abc_variable);
@ -5062,7 +5062,7 @@ sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
struct sctp_tmit_chunk *lchk;
#ifdef SCTP_HIGH_SPEED
sctp_hs_cwnd_decrease(net);
sctp_hs_cwnd_decrease(stcb, net);
#else
#ifdef SCTP_CWND_MONITOR
int old_cwnd = net->cwnd;

View File

@ -587,6 +587,7 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
}
stcb->asoc.control_pdapi = NULL;
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
}
/* goto SHUTDOWN_RECEIVED state to block new requests */
if (stcb->sctp_socket) {
@ -677,6 +678,7 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
}
stcb->asoc.control_pdapi = NULL;
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
}
/* are the queues empty? */
if (!TAILQ_EMPTY(&asoc->send_queue) ||
@ -4314,8 +4316,8 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
printf("Ok, Common input processing called, m:%x iphlen:%d offset:%d\n",
(uint32_t) m, iphlen, offset);
printf("Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
m, iphlen, offset);
}
#endif /* SCTP_DEBUG */
@ -4624,8 +4626,8 @@ sctp_input(m, off)
if (calc_check != check) {
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%x mlen:%d iphlen:%d\n",
calc_check, check, (uint32_t) m, mlen, iphlen);
printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
calc_check, check, m, mlen, iphlen);
}
#endif
@ -4700,7 +4702,7 @@ sctp_input(m, off)
* idea, so I will leave it in place.
*/
if (ipsec4_in_reject_so(m, inp->ip_inp.inp.inp_socket)) {
if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
ipsecstat.in_polvio++;
SCTP_STAT_INCR(sctps_hdrops);
goto bad;

View File

@ -4486,7 +4486,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
* We add one here to keep the assoc from
* dis-appearing on us.
*/
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
sctp_abort_an_association(inp, stcb,
SCTP_RESPONSE_TO_USER_REQ,
m);
@ -4504,7 +4504,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
* iterator timer :-0
*/
SCTP_TCB_LOCK(stcb);
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
goto no_chunk_output;
}
} else {
@ -4574,11 +4574,11 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
TAILQ_EMPTY(&asoc->sent_queue) &&
(asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
abort_anyway:
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
sctp_abort_an_association(stcb->sctp_ep, stcb,
SCTP_RESPONSE_TO_USER_REQ,
NULL);
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
goto no_chunk_output;
}
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
@ -9566,7 +9566,7 @@ sctp_lower_sosend(struct socket *so,
}
}
/* Keep the stcb from being freed under our feet */
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
free_cnt_applied = 1;
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
@ -9717,7 +9717,7 @@ sctp_lower_sosend(struct socket *so,
SCTP_TCB_LOCK(stcb);
hold_tcblock = 1;
}
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
free_cnt_applied = 0;
/* release this lock, otherwise we hang on ourselves */
sctp_abort_an_association(stcb->sctp_ep, stcb,
@ -10162,7 +10162,7 @@ sctp_lower_sosend(struct socket *so,
(asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
abort_anyway:
if (free_cnt_applied) {
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
free_cnt_applied = 0;
}
sctp_abort_an_association(stcb->sctp_ep, stcb,
@ -10292,8 +10292,8 @@ sctp_lower_sosend(struct socket *so,
if ((stcb) && hold_tcblock) {
SCTP_TCB_UNLOCK(stcb);
}
if ((stcb) && (free_cnt_applied)) {
atomic_add_16(&stcb->asoc.refcnt, -1);
if (stcb && free_cnt_applied) {
atomic_add_int(&stcb->asoc.refcnt, -1);
}
#ifdef INVARIENTS
if (stcb) {

View File

@ -3391,7 +3391,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
* timer a passing stranger may have started :-S
*/
if (from_inpcbfree == 0) {
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
@ -3410,7 +3410,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_iterator_asoc_being_freed(inp, stcb);
/* re-increment the lock */
if (from_inpcbfree == 0) {
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
}
/* now restop the timers to be sure - this is paranoia at is finest! */
callout_stop(&asoc->hb_timer.timer);
@ -4379,9 +4379,9 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
/* does the source address already exist? if so skip it */
l_inp = inp = stcb->sctp_ep;
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
/* we must add the source address */
@ -4440,10 +4440,10 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
sin.sin_addr.s_addr = p4->addr;
sa = (struct sockaddr *)&sin;
inp = stcb->sctp_ep;
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
local_sa, stcb);
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
inp == NULL) {
@ -4502,10 +4502,10 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
sizeof(p6->addr));
sa = (struct sockaddr *)&sin6;
inp = stcb->sctp_ep;
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
local_sa, stcb);
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
if (stcb_tmp == NULL && (inp == stcb->sctp_ep ||
inp == NULL)) {
/*

View File

@ -740,12 +740,15 @@ struct sctp_association {
sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */
struct sctp_keyhead shared_keys; /* assoc's shared keys */
sctp_authinfo_t authinfo; /* randoms, cached keys */
uint16_t peer_hmac_id; /* peer HMAC id to send */
/*
* refcnt to block freeing when a sender or receiver is off coping
* user data in.
*/
uint16_t refcnt;
uint32_t refcnt;
uint32_t chunks_on_out_queue; /* total chunks floating around,
* locked by send socket buffer */
uint16_t peer_hmac_id; /* peer HMAC id to send */
/*
* Being that we have no bag to collect stale cookies, and that we
@ -784,8 +787,6 @@ struct sctp_association {
uint16_t last_strm_seq_delivered;
uint16_t last_strm_no_delivered;
uint16_t chunks_on_out_queue; /* total chunks floating around,
* locked by send socket buffer */
uint16_t last_revoke_count;
int16_t num_send_timers_up;

View File

@ -587,7 +587,7 @@ struct sctp_str_log {
};
struct sctp_sb_log {
uint32_t stcb;
void *stcb;
uint32_t so_sbcc;
uint32_t stcb_sbcc;
uint32_t incr;
@ -628,8 +628,8 @@ struct sctp_sack_log {
};
struct sctp_lock_log {
uint32_t sock;
uint32_t inp;
void *sock;
void *inp;
uint8_t tcb_lock;
uint8_t inp_lock;
uint8_t info_lock;
@ -641,14 +641,14 @@ struct sctp_lock_log {
};
struct sctp_rto_log {
uint32_t net;
void *net;
uint32_t rtt;
uint32_t rttvar;
uint8_t direction;
};
struct sctp_nagle_log {
uint32_t stcb;
void *stcb;
uint32_t total_flight;
uint32_t total_in_queue;
uint16_t count_in_queue;
@ -656,7 +656,7 @@ struct sctp_nagle_log {
};
struct sctp_sbwake_log {
uint32_t stcb;
void *stcb;
uint16_t send_q;
uint16_t sent_q;
uint16_t flight;
@ -675,8 +675,8 @@ struct sctp_misc_info {
};
struct sctp_log_closing {
uint32_t inp;
uint32_t stcb;
void *inp;
void *stcb;
uint32_t sctp_flags;
uint16_t state;
int16_t loc;

View File

@ -164,7 +164,7 @@ sctp_sblog(struct sockbuf *sb,
sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB;
sctp_clog[sctp_cwnd_log_at].x.sb.stcb = (uint32_t) stcb;
sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb;
sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc;
if (stcb)
sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc;
@ -182,10 +182,10 @@ sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
sctp_clog[sctp_cwnd_log_at].from = 0;
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE;
sctp_clog[sctp_cwnd_log_at].x.close.inp = (uint32_t) inp;
sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp;
sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags;
if (stcb) {
sctp_clog[sctp_cwnd_log_at].x.close.stcb = (uint32_t) stcb;
sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb;
sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state;
} else {
sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0;
@ -204,7 +204,7 @@ rto_logging(struct sctp_nets *net, int from)
sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT;
sctp_clog[sctp_cwnd_log_at].x.rto.net = (uint32_t) net;
sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net;
sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt;
sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance;
sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir;
@ -234,7 +234,7 @@ sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action;
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE;
sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (uint32_t) stcb;
sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb;
sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight;
sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
@ -377,8 +377,8 @@ sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT;
sctp_clog[sctp_cwnd_log_at].x.lock.sock = (uint32_t) inp->sctp_socket;
sctp_clog[sctp_cwnd_log_at].x.lock.inp = (uint32_t) inp;
sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket;
sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp;
if (stcb) {
sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
} else {
@ -495,7 +495,7 @@ sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int f
sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE;
sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (uint32_t) stcb;
sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb;
sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt;
sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count;
sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt;
@ -1309,9 +1309,9 @@ sctp_timeout_handler(void *t)
return;
}
if (stcb) {
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_LOCK(stcb);
atomic_add_16(&stcb->asoc.refcnt, -1);
atomic_add_int(&stcb->asoc.refcnt, -1);
}
/* mark as being serviced now */
callout_deactivate(&tmr->timer);
@ -3003,6 +3003,7 @@ sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
}
if (no_lock == 0)
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
} else {
/* append to socket */
add_to_end:
@ -4198,15 +4199,13 @@ sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock,
{
/* User pulled some data, do we need a rwnd update? */
int r_unlocked = 0;
int tcb_incr_up = 0;
uint32_t dif, rwnd;
struct socket *so = NULL;
if (stcb == NULL)
return;
atomic_add_16(&stcb->asoc.refcnt, 1);
tcb_incr_up = 1;
atomic_add_int(&stcb->asoc.refcnt, 1);
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
/* Pre-check If we are freeing no update */
@ -4291,9 +4290,7 @@ sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock,
}
SCTP_INP_DECR_REF(stcb->sctp_ep);
no_lock:
if (tcb_incr_up) {
atomic_add_16(&stcb->asoc.refcnt, -1);
}
atomic_add_int(&stcb->asoc.refcnt, -1);
return;
}
@ -4571,7 +4568,8 @@ sctp_sorecvmsg(struct socket *so,
if (stcb) {
if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
(control->do_not_ref_stcb == 0)) {
stcb = NULL;
if (freecnt_applied == 0)
stcb = NULL;
} else if (control->do_not_ref_stcb == 0) {
/* you can't free it on me please */
/*
@ -4581,7 +4579,7 @@ sctp_sorecvmsg(struct socket *so,
* to increment, we need to use the atomic add to
* the refcnt
*/
atomic_add_16(&stcb->asoc.refcnt, 1);
atomic_add_int(&stcb->asoc.refcnt, 1);
freecnt_applied = 1;
/*
* Setup to remember how much we have not yet told
@ -5043,7 +5041,20 @@ sctp_sorecvmsg(struct socket *so,
}
goto wait_some_more;
} else if (control->data == NULL) {
panic("Impossible data==NULL length !=0");
/*
* we must re-sync since data is probably being
* added
*/
SCTP_INP_READ_LOCK(inp);
if ((control->length > 0) && (control->data == NULL)) {
/*
* big trouble.. we have the lock and its
* corrupt?
*/
panic("Impossible data==NULL length !=0");
}
SCTP_INP_READ_UNLOCK(inp);
/* We will fall around to get more data */
}
goto get_more_data;
} else {
@ -5277,14 +5288,17 @@ sctp_sorecvmsg(struct socket *so,
SOCKBUF_UNLOCK(&so->so_rcv);
hold_sblock = 0;
}
if ((stcb) && freecnt_applied) {
if (freecnt_applied) {
/*
* The lock on the socket buffer protects us so the free
* code will stop. But since we used the socketbuf lock and
* the sender uses the tcb_lock to increment, we need to use
* the atomic add to the refcnt.
*/
atomic_add_16(&stcb->asoc.refcnt, -1);
if (stcb == NULL) {
panic("stcb for refcnt has gone NULL?");
}
atomic_add_int(&stcb->asoc.refcnt, -1);
freecnt_applied = 0;
/* Save the value back for next time */
stcb->freed_by_sorcv_sincelast = freed_so_far;

View File

@ -198,7 +198,7 @@ sctp_free_bufspace(struct sctp_tcb *, struct sctp_association *,
#define sctp_free_bufspace(stcb, asoc, tp1, chk_cnt) \
do { \
if (tp1->data != NULL) { \
atomic_add_16(&((asoc)->chunks_on_out_queue), -chk_cnt); \
atomic_add_int(&((asoc)->chunks_on_out_queue), -chk_cnt); \
if ((asoc)->total_output_queue_size >= tp1->book_size) { \
atomic_add_int(&((asoc)->total_output_queue_size), -tp1->book_size); \
} else { \
@ -220,7 +220,7 @@ do { \
#define sctp_free_spbufspace(stcb, asoc, sp) \
do { \
if (sp->data != NULL) { \
atomic_add_16(&(asoc)->chunks_on_out_queue, -1); \
atomic_add_int(&(asoc)->chunks_on_out_queue, -1); \
if ((asoc)->total_output_queue_size >= sp->length) { \
atomic_add_int(&(asoc)->total_output_queue_size,sp->length); \
} else { \

View File

@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#ifdef IPSEC
#include <netinet6/ipsec.h>
#include <netinet6/ipsec6.h>
#endif /* IPSEC */
#if defined(NFAITH) && NFAITH > 0
@ -193,8 +194,8 @@ sctp6_input(mp, offp, proto)
if (calc_check != check) {
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%x mlen:%d iphlen:%d\n",
calc_check, check, (u_int)m,
printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
calc_check, check, m,
mlen, iphlen);
}
#endif
@ -249,11 +250,11 @@ sctp6_input(mp, offp, proto)
/*
* Check AH/ESP integrity.
*/
if (in6p->sctp_socket && (ipsec6_in_reject_so(m, in6p->sctp_socket)) {
if (in6p_ip && (ipsec6_in_reject(m, in6p_ip))) {
/* XXX */
ipsec6stat.in_polvio++;
goto bad;
}
}
#endif /* IPSEC */