- During shutdown pending, when the last sack came in and

the last message on the send stream was "null" but still
  there, a state we allow, we could get hung and not clean
  it up and wait for the shutdown guard timer to clear the
  association without a graceful close. Fix this so that
  that we properly clean up.
- Added support for Multiple ASCONF per new RFC. We only
  (so far) accept input of these and cannot yet generate
  a multi-asconf.
- Sysctl'd support for experimental Fast Handover feature. Always
  disabled unless sysctl or socket option changes to enable.
- Error case in add-ip where the peer supports AUTH and ADD-IP
  but does NOT require AUTH of ASCONF/ASCONF-ACK. We need to
  ABORT in this case.
- According to the Kyoto summit of socket api developers
  (Solaris, Linux, BSD). We need to have:
   o non-eeor mode messages be atomic - Fixed
   o Allow implicit setup of an assoc in 1-2-1 model if
     using the sctp_**() send calls - Fixed
   o Get rid of HAVE_XXX declarations - Done
   o add a sctp_pr_policy in hole in sndrcvinfo structure - Done
   o add a PR_SCTP_POLICY_VALID type flag - yet to-do in a future patch!
- Optimize sctp6 calls to reuse code in sctp_usrreq. Also optimize
  when we close sending out the data and disabling Nagle.
- Change key concatenation order to match the auth RFC
- When sending OOTB shutdown_complete always do csum.
- Don't send PKT-DROP to a PKT-DROP
- For abort chunks just always checksums same for
  shutdown-complete.
- inpcb_free front state had a bug where in queue
  data could wedge an assoc. We need to just abandon
  ones in front states (free_assoc).
- If a peer sends us a 64k abort, we would try to
  assemble a response packet which may be larger than
  64k. This then would be dropped by IP. Instead make
  a "minimum" size for us 64k-2k (we want at least
  2k for our initack). If we receive such an init
  discard it early without all the processing.
- When we peel off we must increment the tcb ref count
  to keep it from being freed from underneath us.
- handling fwd-tsn had bugs that caused memory overwrites
  when given faulty data, fixed so can't happen and we
  also stop at the first bad stream no.
- Fixed so comm-up generates the adaption indication.
- peeloff did not get the hmac params copied.
- fix it so we lock the addr list when doing src-addr selection
  (in future we need to use a multi-reader/one writer lock here)
- During lowlevel output, we could end up with a _l_addr set
  to null if the iterator is calling the output routine. This
  means we would possibly crash when we gather the MTU info.
  Fix so we only do the gather where we have a src address
  cached.
- we need to be sure to set abort flag on conn state when
  we receive an abort.
- peeloff could leak a socket. Moved code so the close will
  find the socket if the peeloff fails (uipc_syscalls.c)

Approved by:	re@freebsd.org(Ken Smith)
This commit is contained in:
Randall Stewart 2007-08-27 05:19:48 +00:00
parent 4a296ec798
commit 2afb3e849f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=171990
21 changed files with 833 additions and 520 deletions

View File

@ -2269,21 +2269,18 @@ sctp_peeloff(td, uap)
so->so_state &= ~SS_NOFDREF;
so->so_qstate &= ~SQ_COMP;
so->so_head = NULL;
ACCEPT_UNLOCK();
error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
if (error)
goto noconnection;
if (head->so_sigio != NULL)
fsetown(fgetown(&head->so_sigio), &so->so_sigio);
FILE_LOCK(nfp);
nfp->f_data = so;
nfp->f_flag = fflag;
nfp->f_type = DTYPE_SOCKET;
nfp->f_ops = &socketops;
FILE_UNLOCK(nfp);
error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
if (error)
goto noconnection;
if (head->so_sigio != NULL)
fsetown(fgetown(&head->so_sigio), &so->so_sigio);
noconnection:
/*

View File

@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$");
/*
* SCTP protocol - RFC2960.
*/
struct sctphdr {
uint16_t src_port; /* source port */
uint16_t dest_port; /* destination port */
@ -348,18 +347,6 @@ __attribute__((packed));
struct sctp_chunkhdr ch; /* header from chunk in error */
} __attribute__((packed));
#define HAVE_SCTP 1
#define HAVE_KERNEL_SCTP 1
#define HAVE_SCTP_PRSCTP 1
#define HAVE_SCTP_ADDIP 1
#define HAVE_SCTP_CANSET_PRIMARY 1
#define HAVE_SCTP_SAT_CAPABILITY 1
#define HAVE_SCTP_MULTIBUF 1
#define HAVE_SCTP_NOCONNECT 0
#define HAVE_SCTP_ECN_NONCE 1 /* ECN Nonce option */
#define HAVE_SCTP_AUTH 1
#define HAVE_SCTP_EXT_RCVINFO 1
#define HAVE_SCTP_CONNECTX 1
/*
* Main SCTP chunk types we place these here so natd and f/w's in user land
* can find them.
@ -484,6 +471,17 @@ __attribute__((packed));
#define SCTP_PCB_FLAGS_NO_FRAGMENT 0x00100000
#define SCTP_PCB_FLAGS_EXPLICIT_EOR 0x00400000
/*-
* mobility_features parameters (by micchie).Note
* these features are applied against the
* sctp_mobility_features flags.. not the sctp_features
* flags.
*/
#define SCTP_MOBILITY_BASE 0x00000001
#define SCTP_MOBILITY_FASTHANDOFF 0x00000002
#define SCTP_MOBILITY_DO_FASTHANDOFF 0x00000004
#define SCTP_SMALLEST_PMTU 512 /* smallest pmtu allowed when disabling PMTU
* discovery */
@ -537,4 +535,5 @@ __attribute__((packed));
#define SCTP_THRESHOLD_LOGGING 0x02000000
#endif /* !_NETINET_SCTP_H_ */

View File

@ -569,11 +569,12 @@ sctp_process_asconf_set_primary(struct mbuf *m,
*/
void
sctp_handle_asconf(struct mbuf *m, unsigned int offset,
struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb)
struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb,
int first)
{
struct sctp_association *asoc;
uint32_t serial_num;
struct mbuf *m_ack, *m_result, *m_tail;
struct mbuf *n, *m_ack, *m_result, *m_tail;
struct sctp_asconf_ack_chunk *ack_cp;
struct sctp_asconf_paramhdr *aph, *ack_aph;
struct sctp_ipv6addr_param *p_addr;
@ -582,6 +583,7 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
/* asconf param buffer */
uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
struct sctp_asconf_ack *ack, *ack_next;
/* verify minimum length */
if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) {
@ -593,13 +595,12 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
asoc = &stcb->asoc;
serial_num = ntohl(cp->serial_number);
if (serial_num == asoc->asconf_seq_in) {
if (compare_with_wrap(asoc->asconf_seq_in, serial_num, MAX_SEQ) ||
serial_num == asoc->asconf_seq_in) {
/* got a duplicate ASCONF */
SCTPDBG(SCTP_DEBUG_ASCONF1,
"handle_asconf: got duplicate serial number = %xh\n",
serial_num);
/* resend last ASCONF-ACK... */
sctp_send_asconf_ack(stcb, 1);
return;
} else if (serial_num != (asoc->asconf_seq_in + 1)) {
SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: incorrect serial number = %xh (expected next = %xh)\n",
@ -613,10 +614,25 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
SCTPDBG(SCTP_DEBUG_ASCONF1,
"handle_asconf: asconf_limit=%u, sequence=%xh\n",
asconf_limit, serial_num);
if (asoc->last_asconf_ack_sent != NULL) {
/* free last ASCONF-ACK message sent */
sctp_m_freem(asoc->last_asconf_ack_sent);
asoc->last_asconf_ack_sent = NULL;
if (first) {
/* delete old cache */
SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: Now processing firstASCONF. Try to delte old cache\n");
ack = TAILQ_FIRST(&stcb->asoc.asconf_ack_sent);
while (ack != NULL) {
ack_next = TAILQ_NEXT(ack, next);
if (ack->serial_number == serial_num)
break;
SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: delete old(%u) < first(%u)\n",
ack->serial_number, serial_num);
TAILQ_REMOVE(&stcb->asoc.asconf_ack_sent, ack, next);
if (ack->data != NULL) {
sctp_m_freem(ack->data);
}
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asconf_ack, ack);
ack = ack_next;
}
}
m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 0,
M_DONTWAIT, 1, MT_DATA);
@ -761,7 +777,21 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
send_reply:
ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
/* save the ASCONF-ACK reply */
asoc->last_asconf_ack_sent = m_ack;
ack = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asconf_ack,
struct sctp_asconf_ack);
if (ack == NULL) {
sctp_m_freem(m_ack);
return;
}
ack->serial_number = serial_num;
ack->last_sent_to = NULL;
ack->data = m_ack;
n = m_ack;
while (n) {
ack->len += SCTP_BUF_LEN(n);
n = SCTP_BUF_NEXT(n);
}
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_ack_sent, ack, next);
/* see if last_control_chunk_from is set properly (use IP src addr) */
if (stcb->asoc.last_control_chunk_from == NULL) {
@ -817,8 +847,6 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
#endif
}
}
/* and send it (a new one) out... */
sctp_send_asconf_ack(stcb, 0);
}
/*
@ -912,6 +940,119 @@ sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn)
}
}
static int
sctp_asconf_queue_mgmt(struct sctp_tcb *, struct sctp_ifa *, uint16_t);
static void
sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net)
{
struct sctp_tmit_chunk *chk;
SCTPDBG(SCTP_DEBUG_ASCONF2, "net_immediate_retrans()\n");
SCTPDBG(SCTP_DEBUG_ASCONF2, "RTO is %d\n", net->RTO);
sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net,
SCTP_FROM_SCTP_TIMER + SCTP_LOC_5);
stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
net->error_count = 0;
TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
if (chk->whoTo == net) {
chk->sent = SCTP_DATAGRAM_RESEND;
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
}
}
}
static void
sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
{
struct sctp_nets *net;
int addrnum, changed;
/*
* If number of local valid addresses is 1, the valid address is
* probably newly added address. Several valid addresses in this
* association. A source address may not be changed. Additionally,
* they can be configured on a same interface as "alias" addresses.
* (by micchie)
*/
addrnum = sctp_local_addr_count(stcb);
SCTPDBG(SCTP_DEBUG_ASCONF1, "p_check_react(): %d local addresses\n",
addrnum);
if (addrnum == 1) {
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
/* clear any cached route and source address */
if (net->ro.ro_rt) {
RTFREE(net->ro.ro_rt);
net->ro.ro_rt = NULL;
}
if (net->src_addr_selected) {
sctp_free_ifa(net->ro._s_addr);
net->ro._s_addr = NULL;
net->src_addr_selected = 0;
}
/* Retransmit unacknowledged DATA chunks immediately */
if (sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_FASTHANDOFF)) {
sctp_net_immediate_retrans(stcb, net);
}
/* also, SET PRIMARY is maybe already sent */
}
return;
}
/* Multiple local addresses exsist in the association. */
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
/* clear any cached route and source address */
if (net->ro.ro_rt) {
RTFREE(net->ro.ro_rt);
net->ro.ro_rt = NULL;
}
if (net->src_addr_selected) {
sctp_free_ifa(net->ro._s_addr);
net->ro._s_addr = NULL;
net->src_addr_selected = 0;
}
/*
* Check if the nexthop is corresponding to the new address.
* If the new address is corresponding to the current
* nexthop, the path will be changed. If the new address is
* NOT corresponding to the current nexthop, the path will
* not be changed.
*/
SCTP_RTALLOC((sctp_route_t *) & net->ro,
stcb->sctp_ep->def_vrf_id);
if (net->ro.ro_rt == NULL)
continue;
//have to be considered...
changed = 0;
if (net->ro._l_addr.sa.sa_family == AF_INET) {
if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *) & net->ro))
changed = 1;
}
if (net->ro._l_addr.sa.sa_family == AF_INET6) {
if (sctp_v6src_match_nexthop(
&newifa->address.sin6, (sctp_route_t *) & net->ro))
changed = 1;
}
/*
* if the newly added address does not relate routing
* information, we skip.
*/
if (changed == 0)
continue;
/* Retransmit unacknowledged DATA chunks immediately */
if (sctp_is_mobility_feature_on(stcb->sctp_ep,
SCTP_MOBILITY_FASTHANDOFF)) {
sctp_net_immediate_retrans(stcb, net);
}
/* Send SET PRIMARY for this new address */
if (net == stcb->asoc.primary_destination) {
(void)sctp_asconf_queue_mgmt(stcb, newifa,
SCTP_SET_PRIM_ADDR);
}
}
}
/*
* process an ADD/DELETE IP ack from peer.
* addr: corresponding sctp_ifa to the address being added/deleted.
@ -935,6 +1076,10 @@ sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr,
/* success case, so remove from the restricted list */
sctp_del_local_addr_restricted(stcb, addr);
if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
sctp_path_check_and_react(stcb, addr);
return;
}
/*
* clear any cached, topologically incorrect source
* addresses
@ -2054,7 +2199,7 @@ sctp_set_primary_ip_address(struct sctp_ifa *ifa)
if (!sctp_asconf_queue_add(stcb, ifa,
SCTP_SET_PRIM_ADDR)) {
/* set primary queuing succeeded */
SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address: queued on stcb=%p, ",
SCTPDBG(SCTP_DEBUG_ASCONF1, ": queued on stcb=%p, ",
stcb);
SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &ifa->address.sa);
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {

View File

@ -47,7 +47,7 @@ extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *, int *);
extern void
sctp_handle_asconf(struct mbuf *, unsigned int, struct sctp_asconf_chunk *,
struct sctp_tcb *);
struct sctp_tcb *, int i);
extern void
sctp_handle_asconf_ack(struct mbuf *, int,

View File

@ -454,6 +454,7 @@ sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2, sctp_key_t * shared)
/* concatenate the keys */
if (sctp_compare_key(key1, key2) <= 0) {
#ifdef SCTP_AUTH_DRAFT_04
/* key is key1 + shared + key2 */
if (sctp_get_keylen(key1)) {
bcopy(key1->key, key_ptr, key1->keylen);
@ -467,7 +468,23 @@ sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2, sctp_key_t * shared)
bcopy(key2->key, key_ptr, key2->keylen);
key_ptr += key2->keylen;
}
#else
/* key is shared + key1 + key2 */
if (sctp_get_keylen(shared)) {
bcopy(shared->key, key_ptr, shared->keylen);
key_ptr += shared->keylen;
}
if (sctp_get_keylen(key1)) {
bcopy(key1->key, key_ptr, key1->keylen);
key_ptr += key1->keylen;
}
if (sctp_get_keylen(key2)) {
bcopy(key2->key, key_ptr, key2->keylen);
key_ptr += key2->keylen;
}
#endif
} else {
#ifdef SCTP_AUTH_DRAFT_04
/* key is key2 + shared + key1 */
if (sctp_get_keylen(key2)) {
bcopy(key2->key, key_ptr, key2->keylen);
@ -481,6 +498,21 @@ sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2, sctp_key_t * shared)
bcopy(key1->key, key_ptr, key1->keylen);
key_ptr += key1->keylen;
}
#else
/* key is shared + key2 + key1 */
if (sctp_get_keylen(shared)) {
bcopy(shared->key, key_ptr, shared->keylen);
key_ptr += shared->keylen;
}
if (sctp_get_keylen(key2)) {
bcopy(key2->key, key_ptr, key2->keylen);
key_ptr += key2->keylen;
}
if (sctp_get_keylen(key1)) {
bcopy(key1->key, key_ptr, key1->keylen);
key_ptr += key1->keylen;
}
#endif
}
return (new_key);
}
@ -1828,6 +1860,8 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
int peer_supports_asconf = 0;
int peer_supports_auth = 0;
int got_random = 0, got_hmacs = 0, got_chklist = 0;
uint8_t saw_asconf = 0;
uint8_t saw_asconf_ack = 0;
/* go through each of the params. */
phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
@ -1838,7 +1872,7 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
if (offset + plen > limit) {
break;
}
if (plen == 0) {
if (plen < sizeof(struct sctp_paramhdr)) {
break;
}
if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
@ -1899,8 +1933,33 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
}
got_hmacs = 1;
} else if (ptype == SCTP_CHUNK_LIST) {
int i, num_chunks;
uint8_t chunks_store[SCTP_SMALL_CHUNK_STORE];
/* did the peer send a non-empty chunk list? */
if (plen > 0)
struct sctp_auth_chunk_list *chunks = NULL;
phdr = sctp_get_next_param(m, offset,
(struct sctp_paramhdr *)chunks_store,
min(plen, sizeof(chunks_store)));
if (phdr == NULL)
return (-1);
/*-
* Flip through the list and mark that the
* peer supports asconf/asconf_ack.
*/
chunks = (struct sctp_auth_chunk_list *)phdr;
num_chunks = plen - sizeof(*chunks);
for (i = 0; i < num_chunks; i++) {
/* record asconf/asconf-ack if listed */
if (chunks->chunk_types[i] == SCTP_ASCONF)
saw_asconf = 1;
if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
saw_asconf_ack = 1;
}
if (num_chunks)
got_chklist = 1;
}
offset += SCTP_SIZE32(plen);
@ -1926,6 +1985,9 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
SCTPDBG(SCTP_DEBUG_AUTH1,
"SCTP: peer supports ASCONF but not AUTH\n");
return (-1);
} else if ((peer_supports_asconf) && (peer_supports_auth) &&
((saw_asconf == 0) || (saw_asconf_ack == 0))) {
return (-2);
}
return (0);
}

View File

@ -57,6 +57,11 @@ __FBSDID("$FreeBSD$");
*/
#define SCTP_ADDRESS_LIMIT 1080
/* We need at least 2k of space for us, inits
* larger than that lets abort.
*/
#define SCTP_LARGEST_INIT_ACCEPTED (65535 - 2048)
/* Number of addresses where we just skip the counting */
#define SCTP_COUNT_LIMIT 40
@ -267,6 +272,20 @@ __FBSDID("$FreeBSD$");
#define SCTP_DEFAULT_AUTO_ASCONF 1
#endif
/* default MOBILITY_BASE mode enable(1)/disable(0) value (sysctl) */
#if defined (__APPLE__) && !defined(SCTP_APPLE_MOBILITY_BASE)
#define SCTP_DEFAULT_MOBILITY_BASE 0
#else
#define SCTP_DEFAULT_MOBILITY_BASE 0
#endif
/* default MOBILITY_FASTHANDOFF mode enable(1)/disable(0) value (sysctl) */
#if defined (__APPLE__) && !defined(SCTP_APPLE_MOBILITY_FASTHANDOFF)
#define SCTP_DEFAULT_MOBILITY_FASTHANDOFF 0
#else
#define SCTP_DEFAULT_MOBILITY_FASTHANDOFF 0
#endif
/*
* Theshold for rwnd updates, we have to read (sb_hiwat >>
* SCTP_RWND_HIWAT_SHIFT) before we will look to see if we need to send a
@ -383,6 +402,7 @@ __FBSDID("$FreeBSD$");
#define SCTP_OUTPUT_FROM_USR_RCVD 13
#define SCTP_OUTPUT_FROM_COOKIE_ACK 14
#define SCTP_OUTPUT_FROM_DRAIN 15
#define SCTP_OUTPUT_FROM_CLOSING 16
/* SCTP chunk types are moved sctp.h for application (NAT, FW) use */
/* align to 32-bit sizes */
@ -775,6 +795,9 @@ __FBSDID("$FreeBSD$");
#define SCTP_CHUNK_BUFFER_SIZE 512
#define SCTP_PARAM_BUFFER_SIZE 512
/* small chunk store for looking at chunk_list in auth */
#define SCTP_SMALL_CHUNK_STORE 260
#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
#define SCTP_HOW_MANY_SECRETS 2 /* how many secrets I keep */
@ -803,9 +826,6 @@ __FBSDID("$FreeBSD$");
#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 18
#define SCTP_NOTIFY_ADAPTATION_INDICATION 19
/* same as above */
#define SCTP_NOTIFY_ADAPTION_INDICATION 19
#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
#define SCTP_NOTIFY_STR_RESET_RECV 21
#define SCTP_NOTIFY_STR_RESET_SEND 22

View File

@ -59,7 +59,7 @@ __FBSDID("$FreeBSD$");
void
sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
{
uint32_t calc, calc_w_oh;
uint32_t calc, calc_save;
/*
* This is really set wrong with respect to a 1-2-m socket. Since
@ -94,32 +94,34 @@ sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
return;
}
/* what is the overhead of all these rwnd's */
calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
calc_save = calc;
asoc->my_rwnd = calc;
if (calc_w_oh == 0) {
/*
* If our overhead is greater than the advertised rwnd, we
* clamp the rwnd to 1. This lets us still accept inbound
* segments, but hopefully will shut the sender down when he
* finally gets the message.
*/
if ((asoc->my_rwnd == 0) &&
(calc < stcb->asoc.my_rwnd_control_len)) {
/*-
* If our rwnd == 0 && the overhead is greater than the
* data onqueue, we clamp the rwnd to 1. This lets us
* still accept inbound segments, but hopefully will shut
* the sender down when he finally gets the message. This
* hopefully will gracefully avoid discarding packets.
*/
asoc->my_rwnd = 1;
}
if (asoc->my_rwnd &&
(asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
/* SWS engaged, tell peer none left */
asoc->my_rwnd = 1;
} else {
/* SWS threshold */
if (asoc->my_rwnd &&
(asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
/* SWS engaged, tell peer none left */
asoc->my_rwnd = 1;
}
}
}
/* Calculate what the rwnd would be */
uint32_t
sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
{
uint32_t calc = 0, calc_w_oh;
uint32_t calc = 0, calc_save = 0, result = 0;
/*
* This is really set wrong with respect to a 1-2-m socket. Since
@ -153,24 +155,27 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
return (calc);
}
/* what is the overhead of all these rwnd's */
calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
if (calc_w_oh == 0) {
/*
* If our overhead is greater than the advertised rwnd, we
* clamp the rwnd to 1. This lets us still accept inbound
* segments, but hopefully will shut the sender down when he
* finally gets the message.
*/
calc = 1;
} else {
/* SWS threshold */
if (calc &&
(calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
/* SWS engaged, tell peer none left */
calc = 1;
}
calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
calc_save = calc;
result = calc;
if ((result == 0) &&
(calc < stcb->asoc.my_rwnd_control_len)) {
/*-
* If our rwnd == 0 && the overhead is greater than the
* data onqueue, we clamp the rwnd to 1. This lets us
* still accept inbound segments, but hopefully will shut
* the sender down when he finally gets the message. This
* hopefully will gracefully avoid discarding packets.
*/
result = 1;
}
return (calc);
if (asoc->my_rwnd &&
(asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
/* SWS engaged, tell peer none left */
result = 1;
}
return (result);
}
@ -4155,10 +4160,15 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
*/
sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
sctp_streamhead);
if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
asoc->locked_on_sending = NULL;
asoc->stream_queue_cnt--;
if ((sp) && (sp->length == 0)) {
/* Let cleanup code purge it */
if (sp->msg_is_complete) {
asoc->stream_queue_cnt--;
} else {
asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
asoc->locked_on_sending = NULL;
asoc->stream_queue_cnt--;
}
}
}
if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
@ -4821,10 +4831,14 @@ sctp_handle_sack(struct mbuf *m, int offset,
*/
sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
sctp_streamhead);
if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
if ((sp) && (sp->length == 0)) {
asoc->locked_on_sending = NULL;
asoc->stream_queue_cnt--;
if (sp->msg_is_complete) {
asoc->stream_queue_cnt--;
} else {
asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
asoc->stream_queue_cnt--;
}
}
}
if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
@ -5218,7 +5232,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
* report where we are.
*/
struct sctp_association *asoc;
uint32_t new_cum_tsn, gap, back_out_htsn;
uint32_t new_cum_tsn, gap;
unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
struct sctp_stream_in *strm;
struct sctp_tmit_chunk *chk, *at;
@ -5242,7 +5256,6 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
/* Already got there ... */
return;
}
back_out_htsn = asoc->highest_tsn_inside_map;
if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
MAX_TSN)) {
asoc->highest_tsn_inside_map = new_cum_tsn;
@ -5263,8 +5276,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
}
if (gap > m_size) {
asoc->highest_tsn_inside_map = back_out_htsn;
if (gap >= m_size) {
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@ -5299,46 +5311,40 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
SCTP_PEER_FAULTY, oper);
return;
}
if (asoc->highest_tsn_inside_map >
asoc->mapping_array_base_tsn) {
gap = asoc->highest_tsn_inside_map -
asoc->mapping_array_base_tsn;
} else {
gap = asoc->highest_tsn_inside_map +
(MAX_TSN - asoc->mapping_array_base_tsn) + 1;
}
SCTP_STAT_INCR(sctps_fwdtsn_map_over);
slide_out:
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
cumack_set_flag = 1;
}
SCTP_TCB_LOCK_ASSERT(stcb);
for (i = 0; i <= gap; i++) {
SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
}
/*
* Now after marking all, slide thing forward but no sack please.
*/
sctp_sack_check(stcb, 0, 0, abort_flag);
if (*abort_flag)
return;
asoc->mapping_array_base_tsn = new_cum_tsn + 1;
asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
if (cumack_set_flag) {
/*
* fwd-tsn went outside my gap array - not a common
* occurance. Do the same thing we do when a cookie-echo
* arrives.
*/
asoc->highest_tsn_inside_map = new_cum_tsn - 1;
asoc->mapping_array_base_tsn = new_cum_tsn;
asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) ||
(((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) {
goto slide_out;
} else {
for (i = 0; i <= gap; i++) {
SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
}
}
/*
* Now after marking all, slide thing forward but no sack
* please.
*/
sctp_sack_check(stcb, 0, 0, abort_flag);
if (*abort_flag)
return;
}
/*************************************************************/
/* 2. Clear up re-assembly queue */
/*************************************************************/
/*
* First service it if pd-api is up, just in case we can progress it
* forward
@ -5469,8 +5475,9 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
sizeof(struct sctp_strseq),
(uint8_t *) & strseqbuf);
offset += sizeof(struct sctp_strseq);
if (stseq == NULL)
if (stseq == NULL) {
break;
}
/* Convert */
xx = (unsigned char *)&stseq[i];
st = ntohs(stseq[i].stream);
@ -5479,13 +5486,8 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
stseq[i].sequence = st;
/* now process */
if (stseq[i].stream >= asoc->streamincnt) {
/*
* It is arguable if we should continue.
* Since the peer sent bogus stream info we
* may be in deep trouble.. a return may be
* a better choice?
*/
continue;
/* screwed up streams, stop! */
break;
}
strm = &asoc->strmin[stseq[i].stream];
if (compare_with_wrap(stseq[i].sequence,

View File

@ -610,6 +610,7 @@ sctp_handle_abort(struct sctp_abort_chunk *cp,
#ifdef SCTP_ASOCLOG_OF_TSNS
sctp_print_out_track_log(stcb);
#endif
stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
@ -690,6 +691,8 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
}
SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
/* start SHUTDOWN timer */
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
stcb, net);
@ -2219,6 +2222,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
(SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
SCTP_PCB_FLAGS_DONT_WAKE);
inp->sctp_features = (*inp_p)->sctp_features;
inp->sctp_socket = so;
inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
@ -2482,6 +2486,8 @@ sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
/* process according to association state */
if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
/* unexpected SHUTDOWN-COMPLETE... so ignore... */
SCTPDBG(SCTP_DEBUG_INPUT2,
"sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
SCTP_TCB_UNLOCK(stcb);
return;
}
@ -2499,6 +2505,9 @@ sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
/* free the TCB */
SCTPDBG(SCTP_DEBUG_INPUT2,
"sctp_handle_shutdown_complete: calls free-asoc\n");
(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
return;
}
@ -2721,7 +2730,7 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
break;
case SCTP_ASCONF_ACK:
/* resend last asconf ack */
sctp_send_asconf_ack(stcb, 1);
sctp_send_asconf_ack(stcb);
break;
case SCTP_FORWARD_CUM_TSN:
send_forward_tsn(stcb, &stcb->asoc);
@ -3481,6 +3490,7 @@ __attribute__((noinline))
int got_auth = 0;
uint32_t auth_offset = 0, auth_len = 0;
int auth_skipped = 0;
int asconf_cnt = 0;
SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
iphlen, *offset, length, stcb);
@ -3555,18 +3565,35 @@ __attribute__((noinline))
* need to look inside to find the association
*/
if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
struct sctp_chunkhdr *asconf_ch = ch;
uint32_t asconf_offset = 0, asconf_len = 0;
/* inp's refcount may be reduced */
SCTP_INP_INCR_REF(inp);
stcb = sctp_findassociation_ep_asconf(m, iphlen,
*offset, sh, &inp, netp);
asconf_offset = *offset;
do {
asconf_len = ntohs(asconf_ch->chunk_length);
if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
break;
stcb = sctp_findassociation_ep_asconf(m, iphlen,
*offset, sh, &inp, netp);
if (stcb != NULL)
break;
asconf_offset += SCTP_SIZE32(asconf_len);
asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
sizeof(struct sctp_chunkhdr), chunk_buf);
} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
if (stcb == NULL) {
/*
* reduce inp's refcount if not reduced in
* sctp_findassociation_ep_asconf().
*/
SCTP_INP_DECR_REF(inp);
} else {
locked_tcb = stcb;
}
/* now go back and verify any auth chunk to be sure */
if (auth_skipped && (stcb != NULL)) {
struct sctp_auth_chunk *auth;
@ -3783,7 +3810,8 @@ __attribute__((noinline))
return (NULL);
}
}
if ((num_chunks > 1) ||
if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
(num_chunks > 1) ||
(sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
*offset = length;
if (locked_tcb) {
@ -3878,12 +3906,21 @@ __attribute__((noinline))
if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
ignore_sack:
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
}
return (NULL);
}
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/*-
* If we have sent a shutdown-ack, we will pay no
* attention to a sack sent in to us since
* we don't care anymore.
*/
goto ignore_sack;
}
sack = (struct sctp_sack_chunk *)ch;
nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
cum_ack = ntohl(sack->sack.cum_tsn_ack);
@ -4246,7 +4283,8 @@ __attribute__((noinline))
}
stcb->asoc.overall_error_count = 0;
sctp_handle_asconf(m, *offset,
(struct sctp_asconf_chunk *)ch, stcb);
(struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
asconf_cnt++;
}
break;
case SCTP_ASCONF_ACK:
@ -4466,6 +4504,10 @@ __attribute__((noinline))
return (NULL);
}
} /* while */
if (asconf_cnt > 0 && stcb != NULL) {
sctp_send_asconf_ack(stcb);
}
return (stcb);
}
@ -4572,13 +4614,15 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
sctp_auditing(0, inp, stcb, net);
#endif
SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
m, iphlen, offset);
SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n",
m, iphlen, offset, stcb);
if (stcb) {
/* always clear this before beginning a packet */
stcb->asoc.authenticated = 0;
stcb->asoc.seen_a_sack_this_pkt = 0;
SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
stcb, stcb->asoc.state);
if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
(stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
/*-
@ -4770,9 +4814,12 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
/* For retransmission to new primary destination (by micchie) */
sctp_is_mobility_feature_on(inp, SCTP_MOBILITY_DO_FASTHANDOFF) ||
((un_sent) &&
(stcb->asoc.peers_rwnd > 0 ||
(stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
sctp_mobility_feature_off(inp, SCTP_MOBILITY_DO_FASTHANDOFF);
SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
@ -4881,43 +4928,34 @@ sctp_input(i_pak, off)
goto bad;
}
/* validate SCTP checksum */
if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
/*
* we do NOT validate things from the loopback if the sysctl
* is set to 1.
*/
check = sh->checksum; /* save incoming checksum */
if ((check == 0) && (sctp_no_csum_on_loopback)) {
/*
* special hook for where we got a local address
* somehow routed across a non IFT_LOOP type
* interface
*/
if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
goto sctp_skip_csum_4;
}
sh->checksum = 0; /* prepare for calc */
calc_check = sctp_calculate_sum(m, &mlen, iphlen);
if (calc_check != check) {
SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
calc_check, check, m, mlen, iphlen);
stcb = sctp_findassociation_addr(m, iphlen,
offset - sizeof(*ch),
sh, ch, &inp, &net,
vrf_id);
if ((inp) && (stcb)) {
sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
} else if ((inp != NULL) && (stcb == NULL)) {
refcount_up = 1;
}
SCTP_STAT_INCR(sctps_badsum);
SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
goto bad;
}
sh->checksum = calc_check;
check = sh->checksum; /* save incoming checksum */
if ((check == 0) && (sctp_no_csum_on_loopback) &&
((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
(SCTP_IS_IT_LOOPBACK(m)))
) {
goto sctp_skip_csum_4;
}
sh->checksum = 0; /* prepare for calc */
calc_check = sctp_calculate_sum(m, &mlen, iphlen);
if (calc_check != check) {
SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
calc_check, check, m, mlen, iphlen);
stcb = sctp_findassociation_addr(m, iphlen,
offset - sizeof(*ch),
sh, ch, &inp, &net,
vrf_id);
if ((inp) && (stcb)) {
sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
} else if ((inp != NULL) && (stcb == NULL)) {
refcount_up = 1;
}
SCTP_STAT_INCR(sctps_badsum);
SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
goto bad;
}
sh->checksum = calc_check;
sctp_skip_csum_4:
/* destination port of 0 is illegal, based on RFC2960. */
if (sh->dest_port == 0) {

View File

@ -2606,7 +2606,9 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
uint8_t dest_is_loop,
uint8_t dest_is_priv,
int addr_wanted,
sa_family_t fam)
sa_family_t fam,
sctp_route_t * ro
)
{
struct sctp_ifa *ifa, *sifa;
int num_eligible_addr = 0;
@ -2619,6 +2621,27 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
dest_is_priv, fam);
if (sifa == NULL)
continue;
/*
* Check if the IPv6 address matches to next-hop. In the
* mobile case, old IPv6 address may be not deleted from the
* interface. Then, the interface has previous and new
* addresses. We should use one corresponding to the
* next-hop. (by micchie)
*/
if (stcb && fam == AF_INET6 &&
sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
== 0) {
continue;
}
}
/* Avoid topologically incorrect IPv4 address */
if (stcb && fam == AF_INET &&
sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
continue;
}
}
if (stcb) {
if ((non_asoc_addr_ok == 0) &&
sctp_is_addr_restricted(stcb, sifa)) {
@ -2753,7 +2776,7 @@ sctp_choose_boundall(struct sctp_inpcb *inp,
SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
dest_is_priv, cur_addr_num, fam);
dest_is_priv, cur_addr_num, fam, ro);
/* if sctp_ifa is NULL something changed??, fall to plan b. */
if (sctp_ifa) {
@ -2806,7 +2829,7 @@ sctp_choose_boundall(struct sctp_inpcb *inp,
cur_addr_num = 0;
}
sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
dest_is_priv, cur_addr_num, fam);
dest_is_priv, cur_addr_num, fam, ro);
if (sifa == NULL)
continue;
if (net) {
@ -3018,6 +3041,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
}
SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
SCTP_IPI_ADDR_LOCK();
if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
/*
* Bound all case
@ -3025,6 +3049,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
dest_is_priv, dest_is_loop,
non_asoc_addr_ok, fam);
SCTP_IPI_ADDR_UNLOCK();
return (answer);
}
/*
@ -3041,6 +3066,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
dest_is_priv,
dest_is_loop, fam);
}
SCTP_IPI_ADDR_UNLOCK();
return (answer);
}
@ -3528,7 +3554,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
} else {
/* PMTU check versus smallest asoc MTU goes here */
if (ro->ro_rt != NULL) {
if ((ro->ro_rt != NULL) &&
(net->ro._s_addr)) {
uint32_t mtu;
mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
@ -3541,7 +3568,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
net->mtu = mtu;
}
} else {
} else if (ro->ro_rt == NULL) {
/* route was freed */
if (net->ro._s_addr &&
net->src_addr_selected) {
@ -3772,7 +3799,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
net->src_addr_selected = 0;
}
if (ro->ro_rt != NULL) {
if ((ro->ro_rt != NULL) &&
(net->ro._s_addr)) {
uint32_t mtu;
mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
@ -7893,7 +7921,6 @@ sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
return;
}
chk->copy_by_ref = 0;
chk->send_size = sizeof(struct sctp_chunkhdr);
chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
chk->rec.chunk_id.can_take_data = 1;
@ -8000,79 +8027,81 @@ sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net)
}
void
sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans)
sctp_send_asconf_ack(struct sctp_tcb *stcb)
{
/*
* formulate and queue a asconf-ack back to sender. the asconf-ack
* must be stored in the tcb.
*/
struct sctp_tmit_chunk *chk;
struct sctp_asconf_ack *ack, *latest_ack;
struct mbuf *m_ack, *m;
struct sctp_nets *net = NULL;
SCTP_TCB_LOCK_ASSERT(stcb);
/* is there a asconf-ack mbuf chain to send? */
if (stcb->asoc.last_asconf_ack_sent == NULL) {
/* Get the latest ASCONF-ACK */
latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
if (latest_ack == NULL) {
return;
}
/* copy the asconf_ack */
m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL,
M_DONTWAIT);
if (m_ack == NULL) {
/* couldn't copy it */
return;
}
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
/* no memory */
if (m_ack)
sctp_m_freem(m_ack);
return;
}
chk->copy_by_ref = 0;
/* figure out where it goes to */
if (retrans) {
if (latest_ack->last_sent_to != NULL &&
latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
/* we're doing a retransmission */
if (stcb->asoc.used_alt_asconfack > 2) {
/* tried alternate nets already, go back */
chk->whoTo = NULL;
} else {
/* need to try and alternate net */
chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
stcb->asoc.used_alt_asconfack++;
}
if (chk->whoTo == NULL) {
net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
if (net == NULL) {
/* no alternate */
if (stcb->asoc.last_control_chunk_from == NULL)
chk->whoTo = stcb->asoc.primary_destination;
net = stcb->asoc.primary_destination;
else
chk->whoTo = stcb->asoc.last_control_chunk_from;
stcb->asoc.used_alt_asconfack = 0;
net = stcb->asoc.last_control_chunk_from;
}
} else {
/* normal case */
if (stcb->asoc.last_control_chunk_from == NULL)
chk->whoTo = stcb->asoc.primary_destination;
net = stcb->asoc.primary_destination;
else
chk->whoTo = stcb->asoc.last_control_chunk_from;
stcb->asoc.used_alt_asconfack = 0;
net = stcb->asoc.last_control_chunk_from;
}
chk->data = m_ack;
chk->send_size = 0;
/* Get size */
m = m_ack;
while (m) {
chk->send_size += SCTP_BUF_LEN(m);
m = SCTP_BUF_NEXT(m);
latest_ack->last_sent_to = net;
atomic_add_int(&latest_ack->last_sent_to->ref_count, 1);
TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
if (ack->data == NULL) {
continue;
}
/* copy the asconf_ack */
m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
if (m_ack == NULL) {
/* couldn't copy it */
return;
}
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
/* no memory */
if (m_ack)
sctp_m_freem(m_ack);
return;
}
chk->copy_by_ref = 0;
chk->whoTo = net;
chk->data = m_ack;
chk->send_size = 0;
/* Get size */
m = m_ack;
chk->send_size = ack->len;
chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
chk->rec.chunk_id.can_take_data = 1;
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
chk->asoc = &stcb->asoc;
atomic_add_int(&chk->whoTo->ref_count, 1);
TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
}
chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
chk->rec.chunk_id.can_take_data = 1;
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
chk->flags = 0;
chk->asoc = &stcb->asoc;
atomic_add_int(&chk->whoTo->ref_count, 1);
TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
return;
}
@ -9516,11 +9545,7 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
/* add checksum */
if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(mout)) {
comp_cp->sh.checksum = 0;
} else {
comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
}
comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
if (iph_out != NULL) {
sctp_route_t ro;
int ret;
@ -9779,8 +9804,7 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
sctp_free_remote_addr(chk->whoTo);
chk->whoTo = NULL;
}
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_DECR_CHK_COUNT();
sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
return (-1);
}
}
@ -9905,8 +9929,13 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
break;
}
switch (ch->chunk_type) {
case SCTP_PACKET_DROPPED:
case SCTP_ABORT_ASSOCIATION:
/* we don't respond with an PKT-DROP to an ABORT */
/*-
* we don't respond with an PKT-DROP to an ABORT
* or PKT-DROP
*/
sctp_free_a_chunk(stcb, chk);
return;
default:
break;
@ -10395,6 +10424,9 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
} else {
/* Currently not supported */
if (err_cause)
sctp_m_freem(err_cause);
sctp_m_freem(mout);
return;
}
@ -10436,11 +10468,7 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
}
/* add checksum */
if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) {
abm->sh.checksum = 0;
} else {
abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
}
abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
/* no mbuf's */
sctp_m_freem(mout);
@ -10546,11 +10574,7 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
m_copyback(scm, len, padlen, (caddr_t)&cpthis);
len += padlen;
}
if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) {
val = 0;
} else {
val = sctp_calculate_sum(scm, NULL, 0);
}
val = sctp_calculate_sum(scm, NULL, 0);
mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
if (mout == NULL) {
sctp_m_freem(scm);
@ -10714,14 +10738,6 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
int resv_in_first;
*error = 0;
/* Unless E_EOR mode is on, we must make a send FIT in one call. */
if (((user_marks_eor == 0) && non_blocking) &&
(uio->uio_resid > (int)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
/* It will NEVER fit */
SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
*error = EMSGSIZE;
goto out_now;
}
/* Now can we send this? */
if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
(SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
@ -10847,6 +10863,7 @@ sctp_lower_sosend(struct socket *so,
struct sctp_nets *net;
struct sctp_association *asoc;
struct sctp_inpcb *t_inp;
int user_marks_eor;
int create_lock_applied = 0;
int nagle_applies = 0;
int some_on_control = 0;
@ -10854,6 +10871,7 @@ sctp_lower_sosend(struct socket *so,
int hold_tcblock = 0;
int non_blocking = 0;
int temp_flags = 0;
uint32_t local_add_more;
error = 0;
net = NULL;
@ -10869,6 +10887,7 @@ sctp_lower_sosend(struct socket *so,
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
return (EINVAL);
}
user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
atomic_add_int(&inp->total_sends, 1);
if (uio) {
if (uio->uio_resid < 0) {
@ -11068,12 +11087,7 @@ sctp_lower_sosend(struct socket *so,
}
}
if (stcb == NULL) {
if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
error = ENOTCONN;
goto out_unlocked;
} else if (addr == NULL) {
if (addr == NULL) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
error = ENOENT;
goto out_unlocked;
@ -11226,6 +11240,7 @@ sctp_lower_sosend(struct socket *so,
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
if (sndlen > asoc->smallest_mtu) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
error = EMSGSIZE;
goto out_unlocked;
}
@ -11444,11 +11459,32 @@ sctp_lower_sosend(struct socket *so,
error = EFAULT;
goto out_unlocked;
}
/* Unless E_EOR mode is on, we must make a send FIT in one call. */
if ((user_marks_eor == 0) &&
(uio->uio_resid > (int)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
/* It will NEVER fit */
SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
error = EMSGSIZE;
goto out_unlocked;
}
if (user_marks_eor) {
local_add_more = sctp_add_more_threshold;
} else {
/*-
* For non-eeor the whole message must fit in
* the socket send buffer.
*/
local_add_more = uio->uio_resid;
}
len = 0;
if ((max_len < sctp_add_more_threshold) && (SCTP_SB_LIMIT_SND(so) > sctp_add_more_threshold)) {
if (((max_len < local_add_more) &&
(SCTP_SB_LIMIT_SND(so) > local_add_more)) ||
((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) > sctp_max_chunks_on_queue)) {
/* No room right no ! */
SOCKBUF_LOCK(&so->so_snd);
while (SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
while ((SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) ||
((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) > sctp_max_chunks_on_queue)) {
if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
so, asoc, uio->uio_resid);
@ -11505,7 +11541,6 @@ sctp_lower_sosend(struct socket *so,
struct sctp_stream_queue_pending *sp;
struct sctp_stream_out *strm;
uint32_t sndout, initial_out;
int user_marks_eor;
initial_out = uio->uio_resid;
@ -11520,7 +11555,6 @@ sctp_lower_sosend(struct socket *so,
SCTP_TCB_SEND_UNLOCK(stcb);
strm = &stcb->asoc.strmout[srcv->sinfo_stream];
user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
if (strm->last_msg_incomplete == 0) {
do_a_copy_in:
sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
@ -11937,6 +11971,7 @@ sctp_lower_sosend(struct socket *so,
}
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
asoc->primary_destination);
sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
}
}
}
@ -12152,3 +12187,81 @@ sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
return (m);
}
int
sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
{
struct nd_prefix *pfx = NULL;
struct nd_pfxrouter *pfxrtr = NULL;
struct sockaddr_in6 gw6;
if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
return (0);
/* get prefix entry of address */
LIST_FOREACH(pfx, &nd_prefix, ndpr_entry) {
if (pfx->ndpr_stateflags & NDPRF_DETACHED)
continue;
if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
&src6->sin6_addr, &pfx->ndpr_mask))
break;
}
/* no prefix entry in the prefix list */
if (pfx == NULL) {
SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
return (0);
}
SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
/* search installed gateway from prefix entry */
for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
pfxrtr->pfr_next) {
memset(&gw6, 0, sizeof(struct sockaddr_in6));
gw6.sin6_family = AF_INET6;
gw6.sin6_len = sizeof(struct sockaddr_in6);
memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
sizeof(struct in6_addr));
SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
if (sctp_cmpaddr((struct sockaddr *)&gw6,
ro->ro_rt->rt_gateway)) {
SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
return (1);
}
}
SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
return (0);
}
int
sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
{
struct sockaddr_in *sin, *mask;
struct ifaddr *ifa;
struct in_addr srcnetaddr, gwnetaddr;
if (ro == NULL || ro->ro_rt == NULL ||
sifa->address.sa.sa_family != AF_INET) {
return (0);
}
ifa = (struct ifaddr *)sifa->ifa;
mask = (struct sockaddr_in *)(ifa->ifa_netmask);
sin = (struct sockaddr_in *)&sifa->address.sin;
srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
return (1);
}
return (0);
}

View File

@ -69,7 +69,10 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
sctp_route_t * ro, struct sctp_nets *net,
int non_asoc_addr_ok, uint32_t vrf_id);
int
sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro);
int
sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro);
void sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *);
@ -106,7 +109,7 @@ sctp_send_shutdown_complete2(struct mbuf *, int, struct sctphdr *,
void sctp_send_asconf(struct sctp_tcb *, struct sctp_nets *);
void sctp_send_asconf_ack(struct sctp_tcb *, uint32_t);
void sctp_send_asconf_ack(struct sctp_tcb *);
int sctp_get_frag_point(struct sctp_tcb *, struct sctp_association *);

View File

@ -1996,11 +1996,13 @@ sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
struct sctppcbhead *head;
struct sctp_laddr *laddr, *oladdr;
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_INFO_WLOCK();
SCTP_INP_WLOCK(old_inp);
SCTP_INP_WLOCK(new_inp);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
new_inp->sctp_ep.time_of_secret_change =
old_inp->sctp_ep.time_of_secret_change;
@ -2404,6 +2406,26 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
}
/*
* set the automatic mobility_base from kernel flag (by
* micchie)
*/
if (sctp_mobility_base == 0) {
sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE);
} else {
sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE);
}
/*
* set the automatic mobility_fasthandoff from kernel flag
* (by micchie)
*/
if (sctp_mobility_fasthandoff == 0) {
sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF);
sctp_mobility_feature_off(inp, SCTP_MOBILITY_DO_FASTHANDOFF);
} else {
sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF);
sctp_mobility_feature_off(inp, SCTP_MOBILITY_DO_FASTHANDOFF);
}
} else {
/*
* bind specific, make sure flags is off and add a new
@ -2645,18 +2667,13 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
* send()/close or connect/send/close. And
* it wants the data to get across first.
*/
if (asoc->asoc.total_output_queue_size == 0) {
/*
* Just abandon things in the front
* states
*/
/* Just abandon things in the front states */
if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE,
SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) {
cnt_in_sd++;
}
continue;
if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE,
SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) {
cnt_in_sd++;
}
continue;
}
/* Disconnect the socket please */
asoc->sctp_socket = NULL;
@ -2721,7 +2738,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
asoc->asoc.primary_destination);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
asoc->asoc.primary_destination);
sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR);
sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING);
}
} else {
/* mark into shutdown pending */
@ -2782,6 +2799,8 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
cnt_in_sd++;
}
continue;
} else {
sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING);
}
}
cnt_in_sd++;
@ -3751,6 +3770,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
struct sctp_laddr *laddr;
struct sctp_tmit_chunk *chk;
struct sctp_asconf_addr *aparam;
struct sctp_asconf_ack *aack;
struct sctp_stream_reset_list *liste;
struct sctp_queued_to_read *sq;
struct sctp_stream_queue_pending *sp;
@ -4233,9 +4253,16 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
}
if (asoc->last_asconf_ack_sent != NULL) {
sctp_m_freem(asoc->last_asconf_ack_sent);
asoc->last_asconf_ack_sent = NULL;
while (!TAILQ_EMPTY(&asoc->asconf_ack_sent)) {
aack = TAILQ_FIRST(&asoc->asconf_ack_sent);
TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
if (aack->last_sent_to != NULL) {
sctp_free_remote_addr(aack->last_sent_to);
}
if (aack->data != NULL) {
sctp_m_freem(aack->data);
}
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asconf_ack, aack);
}
/* clean up auth stuff */
if (asoc->local_hmacs)
@ -4746,6 +4773,11 @@ sctp_pcb_init()
sizeof(struct sctp_stream_queue_pending),
(sctp_max_number_of_assoc * sctp_chunkscale));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asconf_ack, "sctp_asconf_ack",
sizeof(struct sctp_asconf_ack),
(sctp_max_number_of_assoc * sctp_chunkscale));
/* Master Lock INIT for info structure */
SCTP_INP_INFO_LOCK_INIT();
SCTP_STATLOG_INIT_LOCK();
@ -4830,6 +4862,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
struct sctp_auth_hmac_algo *hmacs = NULL;
uint16_t hmacs_len = 0;
uint8_t saw_asconf = 0;
uint8_t saw_asconf_ack = 0;
uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
struct sctp_auth_chunk_list *chunks = NULL;
uint16_t num_chunks = 0;
@ -5107,8 +5141,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
(struct sctp_paramhdr *)&ai, sizeof(ai));
aip = (struct sctp_adaptation_layer_indication *)phdr;
if (aip) {
sctp_ulp_notify(SCTP_NOTIFY_ADAPTATION_INDICATION,
stcb, ntohl(aip->indication), NULL);
stcb->asoc.peers_adaptation = ntohl(aip->indication);
stcb->asoc.adaptation_needed = 1;
}
}
} else if (ptype == SCTP_SET_PRIM_ADDR) {
@ -5279,6 +5313,12 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
for (i = 0; i < num_chunks; i++) {
(void)sctp_auth_add_chunk(chunks->chunk_types[i],
stcb->asoc.peer_auth_chunks);
/* record asconf/asconf-ack if listed */
if (chunks->chunk_types[i] == SCTP_ASCONF)
saw_asconf = 1;
if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
saw_asconf_ack = 1;
}
got_chklist = 1;
} else if ((ptype == SCTP_HEARTBEAT_INFO) ||
@ -5341,6 +5381,9 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
!stcb->asoc.peer_supports_auth) {
/* peer supports asconf but not auth? */
return (-32);
} else if ((stcb->asoc.peer_supports_asconf) && (stcb->asoc.peer_supports_auth) &&
((saw_asconf == 0) || (saw_asconf_ack == 0))) {
return (-33);
}
/* concatenate the full random key */
#ifdef SCTP_AUTH_DRAFT_04
@ -5376,7 +5419,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
#endif
else {
/* failed to get memory for the key */
return (-33);
return (-34);
}
if (stcb->asoc.authinfo.peer_random != NULL)
sctp_free_key(stcb->asoc.authinfo.peer_random);

View File

@ -184,6 +184,7 @@ struct sctp_epinfo {
sctp_zone_t ipi_zone_chunk;
sctp_zone_t ipi_zone_readq;
sctp_zone_t ipi_zone_strmoq;
sctp_zone_t ipi_zone_asconf_ack;
struct mtx ipi_ep_mtx;
struct mtx it_mtx;
@ -356,6 +357,7 @@ struct sctp_inpcb {
struct socket *sctp_socket;
uint32_t sctp_flags; /* INP state flag set */
uint32_t sctp_features; /* Feature flags */
uint32_t sctp_mobility_features; /* Mobility Feature flags */
struct sctp_pcb sctp_ep;/* SCTP ep data */
/* head of the hash of all associations */
struct sctpasochead *sctp_tcbhash;

View File

@ -110,11 +110,22 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
(SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
n_inp->sctp_socket = so;
n_inp->sctp_features = inp->sctp_features;
n_inp->sctp_mobility_features = inp->sctp_mobility_features;
n_inp->sctp_frag_point = inp->sctp_frag_point;
n_inp->partial_delivery_point = inp->partial_delivery_point;
n_inp->sctp_context = inp->sctp_context;
n_inp->inp_starting_point_for_iterator = NULL;
/* copy in the authentication parameters from the original endpoint */
if (n_inp->sctp_ep.local_hmacs)
sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
n_inp->sctp_ep.local_hmacs =
sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
if (n_inp->sctp_ep.local_auth_chunks)
sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
n_inp->sctp_ep.local_auth_chunks =
sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
(void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
&n_inp->sctp_ep.shared_keys);
/*
* Now we must move it from one hash table to another and get the
* stcb in the right place.
@ -169,6 +180,7 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
(SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
n_inp->sctp_features = inp->sctp_features;
n_inp->sctp_mobility_features = inp->sctp_mobility_features;
n_inp->sctp_frag_point = inp->sctp_frag_point;
n_inp->partial_delivery_point = inp->partial_delivery_point;
n_inp->sctp_context = inp->sctp_context;

View File

@ -547,6 +547,16 @@ struct sctp_cc_functions {
struct sctp_tcb *stcb, struct sctp_nets *net);
};
/* used to save ASCONF-ACK chunks for retransmission */
TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack);
struct sctp_asconf_ack {
TAILQ_ENTRY(sctp_asconf_ack) next;
uint32_t serial_number;
struct sctp_nets *last_sent_to;
struct mbuf *data;
uint16_t len;
};
/*
* Here we have information about each individual association that we track.
* We probably in production would be more dynamic. But for ease of
@ -622,7 +632,7 @@ struct sctp_association {
struct sctp_iterator *stcb_starting_point_for_iterator;
/* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
struct mbuf *last_asconf_ack_sent;
struct sctp_asconf_ackhead asconf_ack_sent;
/*
* pointer to last stream reset queued to control queue by us with
@ -870,7 +880,7 @@ struct sctp_association {
uint32_t refcnt;
uint32_t chunks_on_out_queue; /* total chunks floating around,
* locked by send socket buffer */
uint32_t peers_adaptation;
uint16_t peer_hmac_id; /* peer HMAC id to send */
/*
@ -1001,6 +1011,8 @@ struct sctp_association {
uint8_t saw_sack_with_frags;
uint8_t in_restart_hash;
uint8_t assoc_up_sent;
uint8_t adaptation_needed;
uint8_t adaptation_sent;
/* CMT variables */
uint8_t cmt_dac_pkts_rcvd;
uint8_t sctp_cmt_on_off;

View File

@ -95,6 +95,8 @@ uint32_t sctp_cmt_on_off = 0;
uint32_t sctp_cmt_use_dac = 0;
uint32_t sctp_cmt_pf = 0;
uint32_t sctp_max_retran_chunk = SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT;
uint32_t sctp_mobility_base = SCTP_DEFAULT_MOBILITY_BASE;
uint32_t sctp_mobility_fasthandoff = SCTP_DEFAULT_MOBILITY_FASTHANDOFF;
/* JRS - Variable for default congestion control module */
uint32_t sctp_default_cc_module = SCTPCTL_DEFAULT_CC_MODULE_DEFAULT;
@ -708,6 +710,16 @@ SYSCTL_INT(_net_inet_sctp, OID_AUTO, sctp_logging, CTLFLAG_RW,
&sctp_logging_level, 0,
SCTPCTL_LOGGING_LEVEL_DESC);
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_BASE)
SYSCTL_INT(_net_inet_sctp, OID_AUTO, mobility_base, CTLFLAG_RW,
&sctp_mobility_base, 0, "Enable SCTP Mobility");
#endif
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_FASTHANDOFF)
SYSCTL_INT(_net_inet_sctp, OID_AUTO, mobility_fasthandoff, CTLFLAG_RW,
&sctp_mobility_fasthandoff, 0, "Enable SCTP fast handoff");
#endif
#ifdef SCTP_DEBUG
SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,

View File

@ -421,19 +421,33 @@ __FBSDID("$FreeBSD$");
#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX 2
#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT 1
/* mobility_base: Enable SCTP mobility support */
#define SCTPCTL_MOBILITY_BASE 55
#define SCTPCTL_MOBILITY_BASE_DESC "Enable SCTP base mobility"
#define SCTPCTL_MOBILITY_BASE_MIN 0
#define SCTPCTL_MOBILITY_BASE_MAX 1
#define SCTPCTL_MOBILITY_BASE_DEFAULT SCTP_DEFAULT_MOBILITY_BASE
/* mobility_fasthandoff: Enable SCTP fast handoff support */
#define SCTPCTL_MOBILITY_FASTHANDOFF 56
#define SCTPCTL_MOBILITY_FASTHANDOFF_DESC "Enable SCTP fast handoff"
#define SCTPCTL_MOBILITY_FASTHANDOFF_MIN 0
#define SCTPCTL_MOBILITY_FASTHANDOFF_MAX 1
#define SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT SCTP_DEFAULT_MOBILITY_FASTHANDOFF
#ifdef SCTP_DEBUG
/* debug: Configure debug output */
#define SCTPCTL_DEBUG 55
#define SCTPCTL_DEBUG 57
#define SCTPCTL_DEBUG_DESC "Configure debug output"
#define SCTPCTL_DEBUG_MIN 0
#define SCTPCTL_DEBUG_MAX 0xFFFFFFFF
#define SCTPCTL_DEBUG_DEFAULT 0
#define SCTPCTL_MAXID 55
#define SCTPCTL_MAXID 57
#else
#define SCTPCTL_MAXID 56
#define SCTPCTL_MAXID 58
#endif
/*
@ -497,6 +511,8 @@ __FBSDID("$FreeBSD$");
{ "max_retran_chunk", CTLTYPE_INT }, \
{ "sctp_logging", CTLTYPE_INT }, \
{ "frag_interleave", CTLTYPE_INT }, \
{ "mobility_base", CTLTYPE_INT }, \
{ "mobility_fasthandoff", CTLTYPE_INT }, \
{ "debug", CTLTYPE_INT }, \
}
#else
@ -556,6 +572,8 @@ __FBSDID("$FreeBSD$");
{ "max_retran_chunk", CTLTYPE_INT }, \
{ "sctp_logging", CTLTYPE_INT }, \
{ "frag_interleave", CTLTYPE_INT }, \
{ "mobility_base", CTLTYPE_INT }, \
{ "mobility_fasthandoff", CTLTYPE_INT }, \
}
#endif
@ -624,6 +642,8 @@ extern uint32_t sctp_strict_data_order;
extern uint32_t sctp_min_residual;
extern uint32_t sctp_max_retran_chunk;
extern uint32_t sctp_logging_level;
extern uint32_t sctp_mobility_base;
extern uint32_t sctp_mobility_fasthandoff;
#if defined(SCTP_DEBUG)
extern uint32_t sctp_debug_on;

View File

@ -95,6 +95,7 @@ struct sctp_sndrcvinfo {
uint16_t sinfo_stream;
uint16_t sinfo_ssn;
uint16_t sinfo_flags;
uint16_t sinfo_pr_policy;
uint32_t sinfo_ppid;
uint32_t sinfo_context;
uint32_t sinfo_timetolive;
@ -108,6 +109,7 @@ struct sctp_extrcvinfo {
uint16_t sinfo_stream;
uint16_t sinfo_ssn;
uint16_t sinfo_flags;
uint16_t sinfo_pr_policy;
uint32_t sinfo_ppid;
uint32_t sinfo_context;
uint32_t sinfo_timetolive;
@ -144,6 +146,8 @@ struct sctp_snd_all_completes {
#define SCTP_ADDR_OVER 0x0800/* Override the primary-address */
#define SCTP_SENDALL 0x1000/* Send this on all associations */
#define SCTP_EOR 0x2000/* end of message signal */
#define SCTP_PR_POLICY_VALID 0x4000 /* pr sctp policy valid */
#define INVALID_SINFO_FLAG(x) (((x) & 0xffffff00 \
& ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR)) != 0)

View File

@ -583,7 +583,7 @@ sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
return error;
}
static void
void
sctp_close(struct socket *so)
{
struct sctp_inpcb *inp;
@ -733,7 +733,7 @@ sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
}
}
static int
int
sctp_disconnect(struct socket *so)
{
struct sctp_inpcb *inp;
@ -890,6 +890,8 @@ sctp_disconnect(struct socket *so)
SCTP_INP_RUNLOCK(inp);
(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
return (0);
} else {
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING);
}
}
SCTP_TCB_UNLOCK(stcb);
@ -1023,6 +1025,8 @@ sctp_shutdown(struct socket *so)
SCTP_RESPONSE_TO_USER_REQ,
op_err);
goto skip_unlock;
} else {
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING);
}
}
SCTP_TCB_UNLOCK(stcb);

View File

@ -48,6 +48,12 @@ extern struct pr_usrreqs sctp_usrreqs;
#define sctp_is_feature_on(inp, feature) (inp->sctp_features & feature)
#define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
/* managing mobility_feature in inpcb (by micchie) */
#define sctp_mobility_feature_on(inp, feature) (inp->sctp_mobility_features |= feature)
#define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature)
#define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature)
#define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0)
#define sctp_sbspace(asoc, sb) ((long) (((sb)->sb_hiwat > (asoc)->sb_cc) ? ((sb)->sb_hiwat - (asoc)->sb_cc) : 0))
#define sctp_sbspace_failedmsgs(sb) ((long) (((sb)->sb_hiwat > (sb)->sb_cc) ? ((sb)->sb_hiwat - (sb)->sb_cc) : 0))
@ -90,19 +96,24 @@ extern struct pr_usrreqs sctp_usrreqs;
#define sctp_free_a_chunk(_stcb, _chk) { \
SCTP_TCB_LOCK_ASSERT((_stcb)); \
if ((_chk)->whoTo) { \
sctp_free_remote_addr((_chk)->whoTo); \
(_chk)->whoTo = NULL; \
} \
if (((_stcb)->asoc.free_chunk_cnt > sctp_asoc_free_resc_limit) || \
(sctppcbinfo.ipi_free_chunks > sctp_system_free_resc_limit)) { \
if(_stcb) { \
SCTP_TCB_LOCK_ASSERT((_stcb)); \
if ((_chk)->whoTo) { \
sctp_free_remote_addr((_chk)->whoTo); \
(_chk)->whoTo = NULL; \
} \
if (((_stcb)->asoc.free_chunk_cnt > sctp_asoc_free_resc_limit) || \
(sctppcbinfo.ipi_free_chunks > sctp_system_free_resc_limit)) { \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, (_chk)); \
SCTP_DECR_CHK_COUNT(); \
} else { \
TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
(_stcb)->asoc.free_chunk_cnt++; \
atomic_add_int(&sctppcbinfo.ipi_free_chunks, 1); \
} \
} else { \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, (_chk)); \
SCTP_DECR_CHK_COUNT(); \
} else { \
TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
(_stcb)->asoc.free_chunk_cnt++; \
atomic_add_int(&sctppcbinfo.ipi_free_chunks, 1); \
} \
}
@ -156,12 +167,6 @@ extern struct pr_usrreqs sctp_usrreqs;
if (val < MSIZE) { \
panic("sb_mbcnt goes negative"); \
} \
if (SCTP_BUF_IS_EXTENDED(m)) { \
val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-(SCTP_BUF_EXTEND_SIZE(m))); \
if (val < SCTP_BUF_EXTEND_SIZE(m)) { \
panic("sb_mbcnt goes negative2"); \
} \
} \
if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
val = atomic_fetchadd_int(&(stcb)->asoc.sb_cc,-(SCTP_BUF_LEN((m)))); \
if (val < SCTP_BUF_LEN((m))) {\
@ -181,8 +186,6 @@ extern struct pr_usrreqs sctp_usrreqs;
#define sctp_sballoc(stcb, sb, m) { \
atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
if (SCTP_BUF_IS_EXTENDED(m)) \
atomic_add_int(&(sb)->sb_mbcnt,SCTP_BUF_EXTEND_SIZE(m)); \
if (stcb) { \
atomic_add_int(&(stcb)->asoc.sb_cc,SCTP_BUF_LEN((m))); \
atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
@ -289,6 +292,10 @@ struct sctp_inpcb;
struct sctp_tcb;
struct sctphdr;
void sctp_close(struct socket *so);
int sctp_disconnect(struct socket *so);
void sctp_ctlinput __P((int, struct sockaddr *, void *));
int sctp_ctloutput __P((struct socket *, struct sockopt *));
void sctp_input __P((struct mbuf *, int));

View File

@ -1016,7 +1016,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
TAILQ_INIT(&asoc->nets);
TAILQ_INIT(&asoc->pending_reply_queue);
asoc->last_asconf_ack_sent = NULL;
TAILQ_INIT(&asoc->asconf_ack_sent);
/* Setup to fill the hb random cache at first HB */
asoc->hb_random_idx = 4;
@ -3128,7 +3128,7 @@ sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
sai->sai_type = SCTP_ADAPTATION_INDICATION;
sai->sai_flags = 0;
sai->sai_length = sizeof(struct sctp_adaptation_event);
sai->sai_adaptation_ind = error;
sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
sai->sai_assoc_id = sctp_get_associd(stcb);
SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
@ -3376,22 +3376,15 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
return;
}
}
if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) {
if ((notification != SCTP_NOTIFY_ASSOC_DOWN) &&
(notification != SCTP_NOTIFY_ASSOC_ABORTED) &&
(notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) &&
(notification != SCTP_NOTIFY_DG_FAIL) &&
(notification != SCTP_NOTIFY_PEER_SHUTDOWN)) {
sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL);
stcb->asoc.assoc_up_sent = 1;
}
}
switch (notification) {
case SCTP_NOTIFY_ASSOC_UP:
if (stcb->asoc.assoc_up_sent == 0) {
sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL);
stcb->asoc.assoc_up_sent = 1;
}
if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
sctp_notify_adaptation_layer(stcb, error);
}
break;
case SCTP_NOTIFY_ASSOC_DOWN:
sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL);
@ -3431,10 +3424,6 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
sctp_notify_send_failed(stcb, error,
(struct sctp_tmit_chunk *)data);
break;
case SCTP_NOTIFY_ADAPTATION_INDICATION:
/* Here the error is the adaptation indication */
sctp_notify_adaptation_layer(stcb, error);
break;
case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
{
uint32_t val;

View File

@ -124,42 +124,31 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
/* destination port of 0 is illegal, based on RFC2960. */
if (sh->dest_port == 0)
goto bad;
if ((sctp_no_csum_on_loopback == 0) ||
(!SCTP_IS_IT_LOOPBACK(m))) {
/*
* we do NOT validate things from the loopback if the sysctl
* is set to 1.
*/
check = sh->checksum; /* save incoming checksum */
if ((check == 0) && (sctp_no_csum_on_loopback)) {
/*
* special hook for where we got a local address
* somehow routed across a non IFT_LOOP type
* interface
*/
if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst))
goto sctp_skip_csum;
}
sh->checksum = 0; /* prepare for calc */
calc_check = sctp_calculate_sum(m, &mlen, iphlen);
if (calc_check != check) {
SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
calc_check, check, m, mlen, iphlen);
stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
sh, ch, &in6p, &net, vrf_id);
/* in6p's ref-count increased && stcb locked */
if ((in6p) && (stcb)) {
sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
sctp_chunk_output((struct sctp_inpcb *)in6p, stcb, 2);
} else if ((in6p != NULL) && (stcb == NULL)) {
refcount_up = 1;
}
SCTP_STAT_INCR(sctps_badsum);
SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
goto bad;
}
sh->checksum = calc_check;
check = sh->checksum; /* save incoming checksum */
if ((check == 0) && (sctp_no_csum_on_loopback) &&
(IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst))) {
goto sctp_skip_csum;
}
sh->checksum = 0; /* prepare for calc */
calc_check = sctp_calculate_sum(m, &mlen, iphlen);
if (calc_check != check) {
SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
calc_check, check, m, mlen, iphlen);
stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
sh, ch, &in6p, &net, vrf_id);
/* in6p's ref-count increased && stcb locked */
if ((in6p) && (stcb)) {
sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
sctp_chunk_output((struct sctp_inpcb *)in6p, stcb, 2);
} else if ((in6p != NULL) && (stcb == NULL)) {
refcount_up = 1;
}
SCTP_STAT_INCR(sctps_badsum);
SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
goto bad;
}
sh->checksum = calc_check;
sctp_skip_csum:
net = NULL;
/*
@ -653,59 +642,7 @@ sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
static void
sctp6_close(struct socket *so)
{
struct sctp_inpcb *inp;
uint32_t flags;
inp = (struct sctp_inpcb *)so->so_pcb;
if (inp == 0)
return;
/*
* Inform all the lower layer assoc that we are done.
*/
sctp_must_try_again:
flags = inp->sctp_flags;
#ifdef SCTP_LOG_CLOSING
sctp_log_closing(inp, NULL, 17);
#endif
if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
(atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
(so->so_rcv.sb_cc > 0)) {
#ifdef SCTP_LOG_CLOSING
sctp_log_closing(inp, NULL, 13);
#endif
sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT
,SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
} else {
#ifdef SCTP_LOG_CLOSING
sctp_log_closing(inp, NULL, 14);
#endif
sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
}
/*
* The socket is now detached, no matter what the state of
* the SCTP association.
*/
SOCK_LOCK(so);
SCTP_SB_CLEAR(so->so_snd);
/*
* same for the rcv ones, they are only here for the
* accounting/select.
*/
SCTP_SB_CLEAR(so->so_rcv);
/* Now null out the reference, we are completely detached. */
so->so_pcb = NULL;
SOCK_UNLOCK(so);
} else {
flags = inp->sctp_flags;
if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
goto sctp_must_try_again;
}
}
return;
sctp_close(so);
}
/* This could be made common with sctp_detach() since they are identical */
@ -714,115 +651,7 @@ static
int
sctp6_disconnect(struct socket *so)
{
struct sctp_inpcb *inp;
inp = (struct sctp_inpcb *)so->so_pcb;
if (inp == NULL) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN);
return (ENOTCONN);
}
SCTP_INP_RLOCK(inp);
if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) {
/* No connection */
SCTP_INP_RUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN);
return (ENOTCONN);
} else {
int some_on_streamwheel = 0;
struct sctp_association *asoc;
struct sctp_tcb *stcb;
stcb = LIST_FIRST(&inp->sctp_asoc_list);
if (stcb == NULL) {
SCTP_INP_RUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
return (EINVAL);
}
SCTP_TCB_LOCK(stcb);
asoc = &stcb->asoc;
if (((so->so_options & SO_LINGER) &&
(so->so_linger == 0)) ||
(so->so_rcv.sb_cc > 0)) {
if (SCTP_GET_STATE(asoc) !=
SCTP_STATE_COOKIE_WAIT) {
/* Left with Data unread */
struct mbuf *op_err;
op_err = sctp_generate_invmanparam(SCTP_CAUSE_USER_INITIATED_ABT);
sctp_send_abort_tcb(stcb, op_err);
SCTP_STAT_INCR_COUNTER32(sctps_aborted);
}
SCTP_INP_RUNLOCK(inp);
if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
(SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
}
if (sctp_free_assoc(inp, stcb, SCTP_DONOT_SETSCOPE,
SCTP_FROM_SCTP6_USRREQ + SCTP_LOC_2) == 0) {
SCTP_TCB_UNLOCK(stcb);
}
/* No unlock tcb assoc is gone */
return (0);
}
if (!TAILQ_EMPTY(&asoc->out_wheel)) {
/* Check to see if some data queued */
struct sctp_stream_out *outs;
TAILQ_FOREACH(outs, &asoc->out_wheel,
next_spoke) {
if (!TAILQ_EMPTY(&outs->outqueue)) {
some_on_streamwheel = 1;
break;
}
}
}
if (TAILQ_EMPTY(&asoc->send_queue) &&
TAILQ_EMPTY(&asoc->sent_queue) &&
(some_on_streamwheel == 0)) {
/* nothing queued to send, so I'm done... */
if ((SCTP_GET_STATE(asoc) !=
SCTP_STATE_SHUTDOWN_SENT) &&
(SCTP_GET_STATE(asoc) !=
SCTP_STATE_SHUTDOWN_ACK_SENT)) {
/* only send SHUTDOWN the first time */
sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
sctp_chunk_output(stcb->sctp_ep, stcb, 1);
if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
(SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
}
SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
stcb->sctp_ep, stcb,
asoc->primary_destination);
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
stcb->sctp_ep, stcb,
asoc->primary_destination);
}
} else {
/*
* we still got (or just got) data to send,
* so set SHUTDOWN_PENDING
*/
/*
* XXX sockets draft says that MSG_EOF
* should be sent with no data. currently,
* we will allow user data to be sent first
* and move to SHUTDOWN-PENDING
*/
asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
}
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_RUNLOCK(inp);
return (0);
}
} else {
/* UDP model does not support this */
SCTP_INP_RUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EOPNOTSUPP);
return EOPNOTSUPP;
}
return (sctp_disconnect(so));
}