- Remove extra comment for 7.0 (no GIANT here).

- Remove unneeded WLOCK/UNLOCK of inp for getting TCB lock.
- Fix panic that may occur when freeing an assoc that has partial
  delivery in progress (may dereference null socket pointer when
  queuing partial delivery aborted notification)
- Some spacing and comment fixes.
- Fix address add handling to clear cached routes and source addresses
  when peer acks the add in case the routing table changes.
Approved by:	re@freebsd.org (Bruce Mah)
This commit is contained in:
Randall Stewart 2007-08-16 01:51:22 +00:00
parent 8cb5ba02d8
commit 2dad8a55be
7 changed files with 106 additions and 63 deletions

View File

@ -871,6 +871,47 @@ sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
sctp_toss_old_asconf(stcb);
}
/*
* cleanup any cached source addresses that may be topologically
* incorrect after a new address has been added to this interface.
*/
static void
sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn)
{
struct sctp_nets *net;
/*
* Ideally, we want to only clear cached routes and source addresses
* that are topologically incorrect. But since there is no easy way
* to know whether the newly added address on the ifn would cause a
* routing change (i.e. a new egress interface would be chosen)
* without doing a new routing lookup and source address selection,
* we will (for now) just flush any cached route using a different
* ifn (and cached source addrs) and let output re-choose them
* during the next send on that net.
*/
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
/*
* clear any cached route (and cached source address) if the
* route's interface is NOT the same as the address change.
* If it's the same interface, just clear the cached source
* address.
*/
if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro) &&
SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index) {
/* clear any cached route */
RTFREE(net->ro.ro_rt);
net->ro.ro_rt = NULL;
}
/* clear any cached source address */
if (net->src_addr_selected) {
sctp_free_ifa(net->ro._s_addr);
net->ro._s_addr = NULL;
net->src_addr_selected = 0;
}
}
}
/*
* process an ADD/DELETE IP ack from peer.
* addr: corresponding sctp_ifa to the address being added/deleted.
@ -883,8 +924,8 @@ sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr,
{
/*
* do the necessary asoc list work- if we get a failure indication,
* leave the address on the "do not use" asoc list if we get a
* success indication, remove the address from the list
* leave the address on the assoc's restricted list. If we get a
* success indication, remove the address from the restricted list.
*/
/*
* Note: this will only occur for ADD_IP_ADDRESS, since
@ -893,6 +934,12 @@ sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr,
if (flag) {
/* success case, so remove from the restricted list */
sctp_del_local_addr_restricted(stcb, addr);
/*
* clear any cached, topologically incorrect source
* addresses
*/
sctp_asconf_nets_cleanup(stcb, addr->ifn_p);
}
/* else, leave it on the list */
}

View File

@ -2186,16 +2186,13 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
return (m);
}
oso = (*inp_p)->sctp_socket;
/*
* We do this to keep the sockets side happy durin
* the sonewcon ONLY.
*/
atomic_add_int(&(*stcb)->asoc.refcnt, 1);
SCTP_TCB_UNLOCK((*stcb));
so = sonewconn(oso, 0
);
SCTP_INP_WLOCK((*stcb)->sctp_ep);
SCTP_TCB_LOCK((*stcb));
SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
if (so == NULL) {
struct mbuf *op_err;
@ -3968,7 +3965,6 @@ __attribute__((noinline))
SCTP_TCB_UNLOCK(locked_tcb);
}
return (NULL);
}
if (netp && *netp) {
int abort_flag = 0;

View File

@ -2692,11 +2692,11 @@ sctp_choose_boundall(struct sctp_inpcb *inp,
uint32_t ifn_index;
struct sctp_vrf *vrf;
/*
* For boundall we can use any address in the association. If
* non_asoc_addr_ok is set we can use any address (at least in
* theory). So we look for preferred addresses first. If we find
* one, we use it. Otherwise we next try to get an address on the
/*-
* For boundall we can use any address in the association.
* If non_asoc_addr_ok is set we can use any address (at least in
* theory). So we look for preferred addresses first. If we find one,
* we use it. Otherwise we next try to get an address on the
* interface, which we should be able to do (unless non_asoc_addr_ok
* is false and we are routed out that way). In these cases where we
* can't use the address of the interface we go through all the
@ -2898,43 +2898,51 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
struct sctp_nets *net,
int non_asoc_addr_ok, uint32_t vrf_id)
{
struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
struct sctp_ifa *answer;
uint8_t dest_is_priv, dest_is_loop;
sa_family_t fam;
/*
/*-
* Rules: - Find the route if needed, cache if I can. - Look at
* interface address in route, Is it in the bound list. If so we
* have the best source. - If not we must rotate amongst the
* addresses.
*
*
* Cavets and issues
*
*
* Do we need to pay attention to scope. We can have a private address
* or a global address we are sourcing or sending to. So if we draw
* it out zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
* For V4 ------------------------------------------ source *
* dest * result ----------------------------------------- <a>
* Private * Global * NAT
* ----------------------------------------- <b> Private *
* Private * No problem -----------------------------------------
* <c> Global * Private * Huh, How will this work?
* ----------------------------------------- <d> Global *
* Global * No Problem ------------------------------------------
* zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz For V6
* ------------------------------------------ source * dest *
* result ----------------------------------------- <a> Linklocal *
* Global * ----------------------------------------- <b>
* Linklocal * Linklocal * No problem
* ----------------------------------------- <c> Global *
* Linklocal * Huh, How will this work?
* ----------------------------------------- <d> Global *
* Global * No Problem ------------------------------------------
* it out
* zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
*
* For V4
*------------------------------------------
* source * dest * result
* -----------------------------------------
* <a> Private * Global * NAT
* -----------------------------------------
* <b> Private * Private * No problem
* -----------------------------------------
* <c> Global * Private * Huh, How will this work?
* -----------------------------------------
* <d> Global * Global * No Problem
*------------------------------------------
* zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
* For V6
*------------------------------------------
* source * dest * result
* -----------------------------------------
* <a> Linklocal * Global *
* -----------------------------------------
* <b> Linklocal * Linklocal * No problem
* -----------------------------------------
* <c> Global * Linklocal * Huh, How will this work?
* -----------------------------------------
* <d> Global * Global * No Problem
*------------------------------------------
* zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
*
* And then we add to that what happens if there are multiple addresses
* assigned to an interface. Remember the ifa on a ifn is a linked
* list of addresses. So one interface can have more than one IP
@ -2943,18 +2951,20 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
* one is best? And what about NAT's sending P->G may get you a NAT
* translation, or should you select the G thats on the interface in
* preference.
*
*
* Decisions:
*
* - count the number of addresses on the interface. - if it is one, no
* problem except case <c>. For <a> we will assume a NAT out there.
*
* - count the number of addresses on the interface.
* - if it is one, no problem except case <c>.
* For <a> we will assume a NAT out there.
* - if there are more than one, then we need to worry about scope P
* or G. We should prefer G -> G and P -> P if possible. Then as a
* secondary fall back to mixed types G->P being a last ditch one. -
* The above all works for bound all, but bound specific we need to
* use the same concept but instead only consider the bound
* addresses. If the bound set is NOT assigned to the interface then
* we must use rotation amongst the bound addresses..
* or G. We should prefer G -> G and P -> P if possible.
* Then as a secondary fall back to mixed types G->P being a last
* ditch one.
* - The above all works for bound all, but bound specific we need to
* use the same concept but instead only consider the bound
* addresses. If the bound set is NOT assigned to the interface then
* we must use rotation amongst the bound addresses..
*/
if (ro->ro_rt == NULL) {
/*
@ -11535,7 +11545,6 @@ sctp_lower_sosend(struct socket *so,
if ((net->flight_size > net->cwnd) &&
(sctp_cmt_on_off == 0)) {
queue_only = 1;
} else if (asoc->ifp_had_enobuf) {
SCTP_STAT_INCR(sctps_ifnomemqueued);
if (net->flight_size > (net->mtu * 2)) {
@ -11624,7 +11633,6 @@ sctp_lower_sosend(struct socket *so,
sctp_chunk_output(inp,
stcb,
SCTP_OUTPUT_FROM_USR_SEND);
}
} else {
sctp_chunk_output(inp,
@ -11835,7 +11843,6 @@ sctp_lower_sosend(struct socket *so,
(stcb->asoc.total_flight > 0) &&
(un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
) {
/*-
* Ok, Nagle is set on and we have data outstanding.
* Don't send anything and let SACKs drive out the

View File

@ -1263,7 +1263,6 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
/* Find the head of the ALLADDR chain */
if (have_lock == 0) {
SCTP_INP_INFO_RLOCK();
}
head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
sctppcbinfo.hashmark)];
@ -3785,7 +3784,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
/* Held for PD-API clear that. */
sq->pdapi_aborted = 1;
sq->held_length = 0;
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) {
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) {
/*
* Need to add a PD-API
* aborted indication.
@ -3917,7 +3916,6 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
LIST_REMOVE(stcb, sctp_asocs);
sctp_add_vtag_to_timewait(inp, asoc->my_vtag, SCTP_TIME_WAIT);
/*
* Now restop the timers to be sure - this is paranoia at is finest!
*/
@ -3929,7 +3927,6 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
(void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
(void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);

View File

@ -478,7 +478,6 @@ sctp_find_alternate_net(struct sctp_tcb *stcb,
if (alt->ro._s_addr) {
sctp_free_ifa(alt->ro._s_addr);
alt->ro._s_addr = NULL;
}
alt->src_addr_selected = 0;
}
@ -967,7 +966,6 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
*/
net->find_pseudo_cumack = 1;
net->find_rtx_pseudo_cumack = 1;
} else { /* CMT is OFF */
alt = sctp_find_alternate_net(stcb, net, 0);
}

View File

@ -1119,7 +1119,7 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
actual += sizeof(sizeof(struct sockaddr_in6));
actual += sizeof(struct sockaddr_in6);
} else {
memcpy(sas, sin, sizeof(*sin));
((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
@ -3480,7 +3480,6 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
sasoc->sasoc_local_rwnd = 0;
if (sasoc->sasoc_cookie_life) {
stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
}
SCTP_TCB_UNLOCK(stcb);
} else {

View File

@ -1163,7 +1163,6 @@ sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
uint8_t *new_array;
uint32_t new_size;
new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
@ -3146,15 +3145,16 @@ sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
/* This always must be called with the read-queue LOCKED in the INP */
void
sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
uint32_t error, int nolock, uint32_t val)
sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
int nolock, uint32_t val)
{
struct mbuf *m_notify;
struct sctp_pdapi_event *pdapi;
struct sctp_queued_to_read *control;
struct sockbuf *sb;
if ((stcb == NULL) || sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
/* event not enabled */
return;
@ -4484,7 +4484,6 @@ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
uint32_t
sctp_get_ifa_hash_val(struct sockaddr *addr)
{
if (addr->sa_family == AF_INET) {
struct sockaddr_in *sin;