Code from the hack-session known as the IETF (and a

bit of debugging afterwards):
- Fix protection code for notification generation.
- Decouple associd from vtag
- Allow vtags to have less strigent requirements in non-uniqueness.
   o don't pre-hash them when you issue one in a cookie.
   o Allow duplicates and use addresses and ports to
     discriminate amongst the duplicates during lookup.
- Add support for the NAT draft draft-ietf-behave-sctpnat-00, this
  is still experimental and needs more extensive testing with the
  Jason Butt ipfw changes.
- Support for the SENDER_DRY event to get DTLS in OpenSSL working
  with a set of patches from Michael Tuexen (hopefully heading to OpenSSL soon).
- Update the support of SCTP-AUTH by Peter Lei.
- Use macros for refcounting.
- Fix MTU for UDP encapsulation.
- Fix reporting back of unsent data.
- Update assoc send counter handling to be consistent with endpoint sent counter.
- Fix a bug in PR-SCTP.
- Fix so we only send another FWD-TSN when a SACK arrives IF and only
  if the adv-peer-ack point progressed. However we still make sure
  a timer is running if we do have an adv_peer_ack point.
- Fix PR-SCTP bug where chunks were retransmitted if they are sent
  unreliable but not abandoned yet.

With the help of:	Michael Teuxen and Peter Lei :-)
MFC after:	 4 weeks
This commit is contained in:
Randall Stewart 2008-12-06 13:19:54 +00:00
parent d27a975f72
commit 830d754d52
25 changed files with 6917 additions and 885 deletions

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -111,6 +111,7 @@ struct sctp_paramhdr {
/* explict EOR signalling */
#define SCTP_EXPLICIT_EOR 0x0000001b
#define SCTP_REUSE_PORT 0x0000001c /* rw */
#define SCTP_AUTH_DEACTIVATE_KEY 0x0000001d
/*
* read-only options
@ -154,6 +155,8 @@ struct sctp_paramhdr {
/* CMT ON/OFF socket option */
#define SCTP_CMT_ON_OFF 0x00001200
#define SCTP_CMT_USE_DAC 0x00001201
/* EY - NR_SACK on/off socket option */
#define SCTP_NR_SACK_ON_OFF 0x00001300
/* JRS - Pluggable Congestion Control Socket option */
#define SCTP_PLUGGABLE_CC 0x00001202
@ -293,11 +296,15 @@ struct sctp_paramhdr {
#define SCTP_CAUSE_PROTOCOL_VIOLATION 0x000d
/* Error causes from RFC5061 */
#define SCTP_CAUSE_DELETING_LAST_ADDR 0xa0
#define SCTP_CAUSE_RESOURCE_SHORTAGE 0xa1
#define SCTP_CAUSE_DELETING_SRC_ADDR 0xa2
#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0xa3
#define SCTP_CAUSE_REQUEST_REFUSED 0xa4
#define SCTP_CAUSE_DELETING_LAST_ADDR 0x00a0
#define SCTP_CAUSE_RESOURCE_SHORTAGE 0x00a1
#define SCTP_CAUSE_DELETING_SRC_ADDR 0x00a2
#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0x00a3
#define SCTP_CAUSE_REQUEST_REFUSED 0x00a4
/* Error causes from nat-draft */
#define SCTP_CAUSE_NAT_COLLIDING_STATE 0x00b0
#define SCTP_CAUSE_NAT_MISSING_STATE 0x00b1
/* Error causes from RFC4895 */
#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
@ -364,6 +371,8 @@ struct sctp_error_unrecognized_chunk {
#define SCTP_SHUTDOWN_COMPLETE 0x0e
/* RFC4895 */
#define SCTP_AUTHENTICATION 0x0f
/* EY nr_sack chunk id*/
#define SCTP_NR_SELECTIVE_ACK 0x10
/************0x40 series ***********/
/************0x80 series ***********/
/* RFC5061 */
@ -406,6 +415,9 @@ struct sctp_error_unrecognized_chunk {
/* ECN Nonce: SACK Chunk Specific Flags */
#define SCTP_SACK_NONCE_SUM 0x01
/* EY nr_sack all bit - All bit is the 2nd LSB of nr_sack chunk flags*/
/* if All bit is set in an nr-sack chunk, then all nr gap acks gap acks*/
#define SCTP_NR_SACK_ALL_BIT 0x02
/* CMT DAC algorithm SACK flag */
#define SCTP_SACK_CMT_DAC 0x80
@ -467,6 +479,7 @@ struct sctp_error_unrecognized_chunk {
#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x00800000
#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS 0x01000000
#define SCTP_PCB_FLAGS_PORTREUSE 0x02000000
#define SCTP_PCB_FLAGS_DRYEVNT 0x04000000
/*-
* mobility_features parameters (by micchie).Note
* these features are applied against the

View File

@ -761,6 +761,9 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
m_result = sctp_process_asconf_set_primary(m, aph,
stcb, error);
break;
case SCTP_NAT_VTAGS:
SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n");
break;
case SCTP_SUCCESS_REPORT:
/* not valid in an ASCONF chunk */
break;
@ -1349,6 +1352,7 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n");
return (-1);
}
aa->special_del = 0;
/* fill in asconf address parameter fields */
/* top level elements are "networked" during send */
aa->ap.aph.ph.param_type = type;
@ -1555,6 +1559,7 @@ sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa)
"sctp_asconf_queue_sa_delete: failed to get memory!\n");
return (-1);
}
aa->special_del = 0;
/* fill in asconf address parameter fields */
/* top level elements are "networked" during send */
aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
@ -2691,6 +2696,7 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
* case)
*/
if (lookup_used == 0 &&
(aa->special_del == 0) &&
aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
struct sctp_ipv6addr_param *lookup;
uint16_t p_size, addr_size;
@ -3234,3 +3240,195 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
}
return (0);
}
void
sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
struct sctp_nets *net)
{
struct sctp_asconf_addr *aa;
struct sctp_ifa *sctp_ifap;
struct sctp_asconf_tag_param *vtag;
struct sockaddr_in *to;
#ifdef INET6
struct sockaddr_in6 *to6;
#endif
if (net == NULL) {
SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
return;
}
if (stcb == NULL) {
SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n");
return;
}
/*
* Need to have in the asconf: - vtagparam(my_vtag/peer_vtag) -
* add(0.0.0.0) - del(0.0.0.0) - Any global addresses add(addr)
*/
SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
SCTP_M_ASC_ADDR);
if (aa == NULL) {
/* didn't get memory */
SCTPDBG(SCTP_DEBUG_ASCONF1,
"sctp_asconf_send_nat_state_update: failed to get memory!\n");
return;
}
aa->special_del = 0;
/* fill in asconf address parameter fields */
/* top level elements are "networked" during send */
aa->ifa = NULL;
aa->sent = 0; /* clear sent flag */
vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph;
vtag->aph.ph.param_type = SCTP_NAT_VTAGS;
vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param);
vtag->local_vtag = htonl(stcb->asoc.my_vtag);
vtag->remote_vtag = htonl(stcb->asoc.peer_vtag);
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
SCTP_M_ASC_ADDR);
if (aa == NULL) {
/* didn't get memory */
SCTPDBG(SCTP_DEBUG_ASCONF1,
"sctp_asconf_send_nat_state_update: failed to get memory!\n");
return;
}
memset(aa, 0, sizeof(struct sctp_asconf_addr));
/* fill in asconf address parameter fields */
/* ADD(0.0.0.0) */
if (net->ro._l_addr.sa.sa_family == AF_INET) {
aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param);
/* No need to add an address, we are using 0.0.0.0 */
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
}
#ifdef INET6
else if (net->ro._l_addr.sa.sa_family == AF_INET6) {
aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param);
/* No need to add an address, we are using 0.0.0.0 */
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
}
#endif /* INET6 */
SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
SCTP_M_ASC_ADDR);
if (aa == NULL) {
/* didn't get memory */
SCTPDBG(SCTP_DEBUG_ASCONF1,
"sctp_asconf_send_nat_state_update: failed to get memory!\n");
return;
}
memset(aa, 0, sizeof(struct sctp_asconf_addr));
/* fill in asconf address parameter fields */
/* ADD(0.0.0.0) */
if (net->ro._l_addr.sa.sa_family == AF_INET) {
aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param);
/* No need to add an address, we are using 0.0.0.0 */
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
}
#ifdef INET6
else if (net->ro._l_addr.sa.sa_family == AF_INET6) {
aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param);
/* No need to add an address, we are using 0.0.0.0 */
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
}
#endif /* INET6 */
/* Now we must hunt the addresses and add all global addresses */
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
struct sctp_vrf *vrf = NULL;
struct sctp_ifn *sctp_ifnp;
uint32_t vrf_id;
vrf_id = stcb->sctp_ep->def_vrf_id;
vrf = sctp_find_vrf(vrf_id);
if (vrf == NULL) {
goto skip_rest;
}
SCTP_IPI_ADDR_RLOCK();
LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
if (sctp_ifap->address.sa.sa_family == AF_INET) {
to = &sctp_ifap->address.sin;
if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
continue;
}
if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
continue;
}
}
#ifdef INET6
else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
to6 = &sctp_ifap->address.sin6;
if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
continue;
}
if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
continue;
}
}
#endif
sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
}
}
SCTP_IPI_ADDR_RUNLOCK();
} else {
struct sctp_laddr *laddr;
LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
if (laddr->ifa == NULL) {
continue;
}
if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
/*
* Address being deleted by the system, dont
* list.
*/
continue;
if (laddr->action == SCTP_DEL_IP_ADDRESS) {
/*
* Address being deleted on this ep don't
* list.
*/
continue;
}
sctp_ifap = laddr->ifa;
if (sctp_ifap->address.sa.sa_family == AF_INET) {
to = &sctp_ifap->address.sin;
if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
continue;
}
if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
continue;
}
}
#ifdef INET6
else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
to6 = &sctp_ifap->address.sin6;
if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
continue;
}
if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
continue;
}
}
#endif
sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
}
}
skip_rest:
/* Now we must send the asconf into the queue */
sctp_send_asconf(stcb, net, 0);
}

View File

@ -86,6 +86,10 @@ extern void
extern void
sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
extern void
sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
struct sctp_nets *net);
extern int
sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -359,9 +359,11 @@ sctp_set_key(uint8_t * key, uint32_t keylen)
return (new_key);
}
/*
/*-
* given two keys of variable size, compute which key is "larger/smaller"
* returns: 1 if key1 > key2 -1 if key1 < key2 0 if key1 = key2
* returns: 1 if key1 > key2
* -1 if key1 < key2
* 0 if key1 = key2
*/
static int
sctp_compare_key(sctp_key_t * key1, sctp_key_t * key2)
@ -531,13 +533,18 @@ sctp_alloc_sharedkey(void)
}
new_key->keyid = 0;
new_key->key = NULL;
new_key->refcount = 1;
new_key->deactivated = 0;
return (new_key);
}
void
sctp_free_sharedkey(sctp_sharedkey_t * skey)
{
if (skey != NULL) {
if (skey == NULL)
return;
if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&skey->refcount)) {
if (skey->key != NULL)
sctp_free_key(skey->key);
SCTP_FREE(skey, SCTP_M_AUTH_KY);
@ -556,40 +563,93 @@ sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
return (NULL);
}
void
int
sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
sctp_sharedkey_t * new_skey)
{
sctp_sharedkey_t *skey;
if ((shared_keys == NULL) || (new_skey == NULL))
return;
return (EINVAL);
/* insert into an empty list? */
if (SCTP_LIST_EMPTY(shared_keys)) {
LIST_INSERT_HEAD(shared_keys, new_skey, next);
return;
return (0);
}
/* insert into the existing list, ordered by key id */
LIST_FOREACH(skey, shared_keys, next) {
if (new_skey->keyid < skey->keyid) {
/* insert it before here */
LIST_INSERT_BEFORE(skey, new_skey, next);
return;
return (0);
} else if (new_skey->keyid == skey->keyid) {
/* replace the existing key */
/* verify this key *can* be replaced */
if ((skey->deactivated) && (skey->refcount > 1)) {
SCTPDBG(SCTP_DEBUG_AUTH1,
"can't replace shared key id %u\n",
new_skey->keyid);
return (EBUSY);
}
SCTPDBG(SCTP_DEBUG_AUTH1,
"replacing shared key id %u\n",
new_skey->keyid);
LIST_INSERT_BEFORE(skey, new_skey, next);
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey);
return;
return (0);
}
if (LIST_NEXT(skey, next) == NULL) {
/* belongs at the end of the list */
LIST_INSERT_AFTER(skey, new_skey, next);
return;
return (0);
}
}
/* shouldn't reach here */
return (0);
}
void
sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
{
sctp_sharedkey_t *skey;
/* find the shared key */
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
/* bump the ref count */
if (skey) {
atomic_add_int(&skey->refcount, 1);
SCTPDBG(SCTP_DEBUG_AUTH2,
"%s: stcb %p key %u refcount acquire to %d\n",
__FUNCTION__, stcb, key_id, skey->refcount);
}
}
void
sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id)
{
sctp_sharedkey_t *skey;
/* find the shared key */
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
/* decrement the ref count */
if (skey) {
sctp_free_sharedkey(skey);
SCTPDBG(SCTP_DEBUG_AUTH2,
"%s: stcb %p key %u refcount release to %d\n",
__FUNCTION__, stcb, key_id, skey->refcount);
/* see if a notification should be generated */
if ((skey->refcount <= 1) && (skey->deactivated)) {
/* notify ULP that key is no longer used */
sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
key_id, 0, SCTP_SO_NOT_LOCKED);
SCTPDBG(SCTP_DEBUG_AUTH2,
"%s: stcb %p key %u no longer used, %d\n",
__FUNCTION__, stcb, key_id, skey->refcount);
}
}
}
@ -623,7 +683,7 @@ sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
LIST_FOREACH(skey, src, next) {
new_skey = sctp_copy_sharedkey(skey);
if (new_skey != NULL) {
sctp_insert_sharedkey(dest, new_skey);
(void)sctp_insert_sharedkey(dest, new_skey);
count++;
}
}
@ -727,9 +787,9 @@ sctp_default_supported_hmaclist(void)
return (new_list);
}
/*
* HMAC algos are listed in priority/preference order find the best HMAC id
* to use for the peer based on local support
/*-
* HMAC algos are listed in priority/preference order
* find the best HMAC id to use for the peer based on local support
*/
uint16_t
sctp_negotiate_hmacid(sctp_hmaclist_t * peer, sctp_hmaclist_t * local)
@ -760,9 +820,9 @@ sctp_negotiate_hmacid(sctp_hmaclist_t * peer, sctp_hmaclist_t * local)
return (SCTP_AUTH_HMAC_ID_RSVD);
}
/*
* serialize the HMAC algo list and return space used caller must guarantee
* ptr has appropriate space
/*-
* serialize the HMAC algo list and return space used
* caller must guarantee ptr has appropriate space
*/
int
sctp_serialize_hmaclist(sctp_hmaclist_t * list, uint8_t * ptr)
@ -994,7 +1054,7 @@ sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t * ctx,
} /* end switch */
}
/*
/*-
* Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
*
* Compute the HMAC digest using the desired hash key, text, and HMAC
@ -1142,9 +1202,10 @@ sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
return (digestlen);
}
/*
/*-
* verify the HMAC digest using the desired hash key, text, and HMAC
* algorithm. Returns -1 on error, 0 on success.
* algorithm.
* Returns -1 on error, 0 on success.
*/
int
sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
@ -1263,10 +1324,10 @@ sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id)
}
/*
* clear any cached key(s) if they match the given key id on an association
* the cached key(s) will be recomputed and re-cached at next use. ASSUMES
* TCB_LOCK is already held
/*-
* clear any cached key(s) if they match the given key id on an association.
* the cached key(s) will be recomputed and re-cached at next use.
* ASSUMES TCB_LOCK is already held
*/
void
sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
@ -1284,9 +1345,10 @@ sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
}
}
/*
/*-
* clear any cached key(s) if they match the given key id for all assocs on
* an association ASSUMES INP_WLOCK is already held
* an endpoint.
* ASSUMES INP_WLOCK is already held
*/
void
sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
@ -1304,8 +1366,9 @@ sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
}
}
/*
* delete a shared key from an association ASSUMES TCB_LOCK is already held
/*-
* delete a shared key from an association
* ASSUMES TCB_LOCK is already held
*/
int
sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
@ -1316,7 +1379,7 @@ sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
return (-1);
/* is the keyid the assoc active sending key */
if (keyid == stcb->asoc.authinfo.assoc_keyid)
if (keyid == stcb->asoc.authinfo.active_keyid)
return (-1);
/* does the key exist? */
@ -1324,6 +1387,10 @@ sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
if (skey == NULL)
return (-1);
/* are there other refcount holders on the key? */
if (skey->refcount > 1)
return (-1);
/* remove it */
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey); /* frees skey->key as well */
@ -1333,35 +1400,29 @@ sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
return (0);
}
/*
* deletes a shared key from the endpoint ASSUMES INP_WLOCK is already held
/*-
* deletes a shared key from the endpoint
* ASSUMES INP_WLOCK is already held
*/
int
sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
{
sctp_sharedkey_t *skey;
struct sctp_tcb *stcb;
if (inp == NULL)
return (-1);
/* is the keyid the active sending key on the endpoint or any assoc */
/* is the keyid the active sending key on the endpoint */
if (keyid == inp->sctp_ep.default_keyid)
return (-1);
LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
SCTP_TCB_LOCK(stcb);
if (keyid == stcb->asoc.authinfo.assoc_keyid) {
SCTP_TCB_UNLOCK(stcb);
return (-1);
}
SCTP_TCB_UNLOCK(stcb);
}
/* does the key exist? */
skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
if (skey == NULL)
return (-1);
/* endpoint keys are not refcounted */
/* remove it */
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey); /* frees skey->key as well */
@ -1371,60 +1432,36 @@ sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
return (0);
}
/*
* set the active key on an association ASSUME TCB_LOCK is already held
/*-
* set the active key on an association
* ASSUMES TCB_LOCK is already held
*/
int
sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
{
sctp_sharedkey_t *skey = NULL;
sctp_key_t *key = NULL;
int using_ep_key = 0;
/* find the key on the assoc */
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
if (skey == NULL) {
/* if not on the assoc, find the key on the endpoint */
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_RLOCK(stcb->sctp_ep);
SCTP_TCB_LOCK(stcb);
atomic_add_int(&stcb->asoc.refcnt, -1);
skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
keyid);
using_ep_key = 1;
}
if (skey == NULL) {
/* that key doesn't exist */
if (using_ep_key) {
SCTP_INP_RUNLOCK(stcb->sctp_ep);
}
return (-1);
}
/* get the shared key text */
key = skey->key;
/* free any existing cached key */
if (stcb->asoc.authinfo.assoc_key != NULL)
sctp_free_key(stcb->asoc.authinfo.assoc_key);
/* compute a new assoc key and cache it */
stcb->asoc.authinfo.assoc_key =
sctp_compute_hashkey(stcb->asoc.authinfo.random,
stcb->asoc.authinfo.peer_random, key);
stcb->asoc.authinfo.assoc_keyid = keyid;
#ifdef SCTP_DEBUG
if (SCTP_AUTH_DEBUG)
sctp_print_key(stcb->asoc.authinfo.assoc_key, "Assoc Key");
#endif
if (using_ep_key) {
SCTP_INP_RUNLOCK(stcb->sctp_ep);
if ((skey->deactivated) && (skey->refcount > 1)) {
/* can't reactivate a deactivated key with other refcounts */
return (-1);
}
/* set the (new) active key */
stcb->asoc.authinfo.active_keyid = keyid;
/* reset the deactivated flag */
skey->deactivated = 0;
return (0);
}
/*
* set the active key on an endpoint ASSUMES INP_WLOCK is already held
/*-
* set the active key on an endpoint
* ASSUMES INP_WLOCK is already held
*/
int
sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
@ -1441,6 +1478,69 @@ sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
return (0);
}
/*-
* deactivates a shared key from the association
* ASSUMES INP_WLOCK is already held
*/
int
sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
{
sctp_sharedkey_t *skey;
if (stcb == NULL)
return (-1);
/* is the keyid the assoc active sending key */
if (keyid == stcb->asoc.authinfo.active_keyid)
return (-1);
/* does the key exist? */
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
if (skey == NULL)
return (-1);
/* are there other refcount holders on the key? */
if (skey->refcount == 1) {
/* no other users, send a notification for this key */
sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb, keyid, 0,
SCTP_SO_LOCKED);
}
/* mark the key as deactivated */
skey->deactivated = 1;
return (0);
}
/*-
* deactivates a shared key from the endpoint
* ASSUMES INP_WLOCK is already held
*/
int
sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
{
sctp_sharedkey_t *skey;
if (inp == NULL)
return (-1);
/* is the keyid the active sending key on the endpoint */
if (keyid == inp->sctp_ep.default_keyid)
return (-1);
/* does the key exist? */
skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
if (skey == NULL)
return (-1);
/* endpoint keys are not refcounted */
/* remove it */
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey); /* frees skey->key as well */
return (0);
}
/*
* get local authentication parameters from cookie (from INIT-ACK)
*/
@ -1581,9 +1681,13 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
/* negotiate what HMAC to use for the peer */
stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
stcb->asoc.local_hmacs);
/* copy defaults from the endpoint */
/* FIX ME: put in cookie? */
stcb->asoc.authinfo.assoc_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
stcb->asoc.authinfo.active_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
/* copy out the shared key list (by reference) from the endpoint */
(void)sctp_copy_skeylist(&stcb->sctp_ep->sctp_ep.shared_keys,
&stcb->asoc.shared_keys);
}
/*
@ -1591,7 +1695,7 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
*/
void
sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
struct sctp_auth_chunk *auth, struct sctp_tcb *stcb)
struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
{
uint32_t digestlen;
sctp_sharedkey_t *skey;
@ -1603,15 +1707,15 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
/* zero the digest + chunk padding */
digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
bzero(auth->hmac, SCTP_SIZE32(digestlen));
/* is an assoc key cached? */
if (stcb->asoc.authinfo.assoc_key == NULL) {
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
stcb->asoc.authinfo.assoc_keyid);
if (skey == NULL) {
/* not in the assoc list, so check the endpoint list */
skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
stcb->asoc.authinfo.assoc_keyid);
/* is the desired key cached? */
if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
(stcb->asoc.authinfo.assoc_key == NULL)) {
if (stcb->asoc.authinfo.assoc_key != NULL) {
/* free the old cached key */
sctp_free_key(stcb->asoc.authinfo.assoc_key);
}
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
/* the only way skey is NULL is if null key id 0 is used */
if (skey != NULL)
key = skey->key;
@ -1621,6 +1725,7 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
stcb->asoc.authinfo.assoc_key =
sctp_compute_hashkey(stcb->asoc.authinfo.random,
stcb->asoc.authinfo.peer_random, key);
stcb->asoc.authinfo.assoc_keyid = keyid;
SCTPDBG(SCTP_DEBUG_AUTH1, "caching key id %u\n",
stcb->asoc.authinfo.assoc_keyid);
#ifdef SCTP_DEBUG
@ -1630,11 +1735,10 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
#endif
}
/* set in the active key id */
auth->shared_key_id = htons(stcb->asoc.authinfo.assoc_keyid);
auth->shared_key_id = htons(keyid);
/* compute and fill in the digest */
(void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id,
stcb->asoc.authinfo.assoc_key,
(void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
m, auth_offset, auth->hmac);
}
@ -1671,9 +1775,11 @@ sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
}
}
/*
* process the incoming Authentication chunk return codes: -1 on any
* authentication error 0 on authentication verification
/*-
* process the incoming Authentication chunk
* return codes:
* -1 on any authentication error
* 0 on authentication verification
*/
int
sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
@ -1736,12 +1842,8 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
if ((stcb->asoc.authinfo.recv_key == NULL) ||
(stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
/* find the shared key on the assoc first */
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, shared_key_id);
if (skey == NULL) {
/* if not on the assoc, find it on the endpoint */
skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
shared_key_id);
}
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
shared_key_id);
/* if the shared key isn't found, discard the chunk */
if (skey == NULL) {
SCTP_STAT_INCR(sctps_recvivalkeyid);
@ -1758,7 +1860,8 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
* *)stcb->asoc.authinfo.recv_keyid);
*/
sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY,
shared_key_id, stcb->asoc.authinfo.recv_keyid);
shared_key_id, stcb->asoc.authinfo.recv_keyid,
SCTP_SO_NOT_LOCKED);
/* compute a new recv assoc key and cache it */
if (stcb->asoc.authinfo.recv_key != NULL)
sctp_free_key(stcb->asoc.authinfo.recv_key);
@ -1801,7 +1904,11 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
*/
void
sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
uint16_t keyid, uint16_t alt_keyid)
uint16_t keyid, uint16_t alt_keyid, int so_locked
#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
SCTP_UNUSED
#endif
)
{
struct mbuf *m_notify;
struct sctp_authkey_event *auth;
@ -1851,11 +1958,11 @@ sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
/* not that we need this */
control->tail_mbuf = m_notify;
sctp_add_to_readq(stcb->sctp_ep, stcb, control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
&stcb->sctp_socket->so_rcv, 1, so_locked);
}
/*
/*-
* validates the AUTHentication related parameters in an INIT/INIT-ACK
* Note: currently only used for INIT as INIT-ACK is handled inline
* with sctp_load_addresses_from_init()
@ -2027,7 +2134,11 @@ sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
}
}
/* copy defaults from the endpoint */
stcb->asoc.authinfo.assoc_keyid = inp->sctp_ep.default_keyid;
stcb->asoc.authinfo.active_keyid = inp->sctp_ep.default_keyid;
/* copy out the shared key list (by reference) from the endpoint */
(void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
&stcb->asoc.shared_keys);
/* now set the concatenated key (random + chunks + hmacs) */
#ifdef SCTP_AUTH_DRAFT_04
@ -2135,11 +2246,13 @@ sctp_test_hmac_sha1(void)
uint32_t digestlen = 20;
int failed = 0;
/*
* test_case = 1 key =
* 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b key_len = 20
* data = "Hi There" data_len = 8 digest =
* 0xb617318655057264e28bc0b6fb378c8ef146be00
/*-
* test_case = 1
* key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
* key_len = 20
* data = "Hi There"
* data_len = 8
* digest = 0xb617318655057264e28bc0b6fb378c8ef146be00
*/
keylen = 20;
memset(key, 0x0b, keylen);
@ -2150,10 +2263,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 2 key = "Jefe" key_len = 4 data =
* "what do ya want for nothing?" data_len = 28 digest =
* 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
/*-
* test_case = 2
* key = "Jefe"
* key_len = 4
* data = "what do ya want for nothing?"
* data_len = 28
* digest = 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
*/
keylen = 4;
strcpy(key, "Jefe");
@ -2164,11 +2280,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 3 key =
* 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa key_len = 20
* data = 0xdd repeated 50 times data_len = 50 digest
* = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
/*-
* test_case = 3
* key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
* key_len = 20
* data = 0xdd repeated 50 times
* data_len = 50
* digest = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
*/
keylen = 20;
memset(key, 0xaa, keylen);
@ -2179,11 +2297,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 4 key =
* 0x0102030405060708090a0b0c0d0e0f10111213141516171819 key_len = 25
* data = 0xcd repeated 50 times data_len = 50 digest
* = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
/*-
* test_case = 4
* key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
* key_len = 25
* data = 0xcd repeated 50 times
* data_len = 50
* digest = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
*/
keylen = 25;
memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
@ -2194,12 +2314,14 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 5 key =
* 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c key_len = 20
* data = "Test With Truncation" data_len = 20 digest
* = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04 digest-96 =
* 0x4c1a03424b55e07fe7f27be1
/*-
* test_case = 5
* key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
* key_len = 20
* data = "Test With Truncation"
* data_len = 20
* digest = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04
* digest-96 = 0x4c1a03424b55e07fe7f27be1
*/
keylen = 20;
memset(key, 0x0c, keylen);
@ -2210,11 +2332,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 6 key = 0xaa repeated 80 times key_len
* = 80 data = "Test Using Larger Than Block-Size Key -
* Hash Key First" data_len = 54 digest =
* 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
/*-
* test_case = 6
* key = 0xaa repeated 80 times
* key_len = 80
* data = "Test Using Larger Than Block-Size Key - Hash Key First"
* data_len = 54
* digest = 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
*/
keylen = 80;
memset(key, 0xaa, keylen);
@ -2225,11 +2349,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 7 key = 0xaa repeated 80 times key_len
* = 80 data = "Test Using Larger Than Block-Size Key and
* Larger Than One Block-Size Data" data_len = 73 digest =
* 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
/*-
* test_case = 7
* key = 0xaa repeated 80 times
* key_len = 80
* data = "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
* data_len = 73
* digest = 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
*/
keylen = 80;
memset(key, 0xaa, keylen);
@ -2261,10 +2387,13 @@ sctp_test_hmac_md5(void)
uint32_t digestlen = 16;
int failed = 0;
/*
* test_case = 1 key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
* key_len = 16 data = "Hi There" data_len = 8 digest =
* 0x9294727a3638bb1c13f48ef8158bfc9d
/*-
* test_case = 1
* key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
* key_len = 16
* data = "Hi There"
* data_len = 8
* digest = 0x9294727a3638bb1c13f48ef8158bfc9d
*/
keylen = 16;
memset(key, 0x0b, keylen);
@ -2275,10 +2404,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 2 key = "Jefe" key_len = 4 data =
* "what do ya want for nothing?" data_len = 28 digest =
* 0x750c783e6ab0b503eaa86e310a5db738
/*-
* test_case = 2
* key = "Jefe"
* key_len = 4
* data = "what do ya want for nothing?"
* data_len = 28
* digest = 0x750c783e6ab0b503eaa86e310a5db738
*/
keylen = 4;
strcpy(key, "Jefe");
@ -2289,10 +2421,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 3 key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
* key_len = 16 data = 0xdd repeated 50 times data_len = 50
* digest = 0x56be34521d144c88dbb8c733f0e8b3f6
/*-
* test_case = 3
* key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
* key_len = 16
* data = 0xdd repeated 50 times
* data_len = 50
* digest = 0x56be34521d144c88dbb8c733f0e8b3f6
*/
keylen = 16;
memset(key, 0xaa, keylen);
@ -2303,11 +2438,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 4 key =
* 0x0102030405060708090a0b0c0d0e0f10111213141516171819 key_len = 25
* data = 0xcd repeated 50 times data_len = 50 digest
* = 0x697eaf0aca3a3aea3a75164746ffaa79
/*-
* test_case = 4
* key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
* key_len = 25
* data = 0xcd repeated 50 times
* data_len = 50
* digest = 0x697eaf0aca3a3aea3a75164746ffaa79
*/
keylen = 25;
memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
@ -2318,11 +2455,14 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 5 key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
* key_len = 16 data = "Test With Truncation" data_len = 20
* digest = 0x56461ef2342edc00f9bab995690efd4c digest-96
* 0x56461ef2342edc00f9bab995
/*-
* test_case = 5
* key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
* key_len = 16
* data = "Test With Truncation"
* data_len = 20
* digest = 0x56461ef2342edc00f9bab995690efd4c
* digest-96 = 0x56461ef2342edc00f9bab995
*/
keylen = 16;
memset(key, 0x0c, keylen);
@ -2333,11 +2473,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 6 key = 0xaa repeated 80 times key_len
* = 80 data = "Test Using Larger Than Block-Size Key -
* Hash Key First" data_len = 54 digest =
* 0x6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd
/*-
* test_case = 6
* key = 0xaa repeated 80 times
* key_len = 80
* data = "Test Using Larger Than Block-Size Key - Hash Key First"
* data_len = 54
* digest = 0x6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd
*/
keylen = 80;
memset(key, 0xaa, keylen);
@ -2348,11 +2490,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
/*
* test_case = 7 key = 0xaa repeated 80 times key_len
* = 80 data = "Test Using Larger Than Block-Size Key and
* Larger Than One Block-Size Data" data_len = 73 digest =
* 0x6f630fad67cda0ee1fb1f562db3aa53e
/*-
* test_case = 7
* key = 0xaa repeated 80 times
* key_len = 80
* data = "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
* data_len = 73
* digest = 0x6f630fad67cda0ee1fb1f562db3aa53e
*/
keylen = 80;
memset(key, 0xaa, keylen);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -68,7 +68,9 @@ typedef struct sctp_key {
typedef struct sctp_shared_key {
LIST_ENTRY(sctp_shared_key) next;
sctp_key_t *key; /* key text */
uint32_t refcount; /* reference count */
uint16_t keyid; /* shared key ID */
uint8_t deactivated; /* key is deactivated */
} sctp_sharedkey_t;
LIST_HEAD(sctp_keyhead, sctp_shared_key);
@ -91,10 +93,11 @@ typedef struct sctp_authinfo {
sctp_key_t *random; /* local random key (concatenated) */
uint32_t random_len; /* local random number length for param */
sctp_key_t *peer_random;/* peer's random key (concatenated) */
sctp_key_t *assoc_key; /* cached concatenated send key */
sctp_key_t *recv_key; /* cached concatenated recv key */
uint16_t active_keyid; /* active send keyid */
uint16_t assoc_keyid; /* current send keyid (cached) */
uint16_t recv_keyid; /* last recv keyid (cached) */
sctp_key_t *assoc_key; /* cached send key */
sctp_key_t *recv_key; /* cached recv key */
} sctp_authinfo_t;
@ -117,10 +120,13 @@ extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
extern size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t * list);
extern void sctp_auth_set_default_chunks(sctp_auth_chklist_t * list);
extern int
sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr);
extern int sctp_pack_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr);
extern int
extern int
sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list,
uint8_t * ptr);
extern int
sctp_pack_auth_chunks(const sctp_auth_chklist_t * list,
uint8_t * ptr);
extern int
sctp_unpack_auth_chunks(const uint8_t * ptr, uint8_t num_chunks,
sctp_auth_chklist_t * list);
@ -139,14 +145,20 @@ sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2,
extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
extern void sctp_free_sharedkey(sctp_sharedkey_t * skey);
extern sctp_sharedkey_t *
sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id);
extern void
sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
uint16_t key_id);
extern int
sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
sctp_sharedkey_t * new_skey);
extern int
extern int
sctp_copy_skeylist(const struct sctp_keyhead *src,
struct sctp_keyhead *dest);
/* ref counts on shared keys, by key id */
extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid);
extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid);
/* hmac list handling */
extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint8_t num_hmacs);
extern void sctp_free_hmaclist(sctp_hmaclist_t * list);
@ -167,25 +179,24 @@ extern void sctp_free_authinfo(sctp_authinfo_t * authinfo);
/* keyed-HMAC functions */
extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
extern uint32_t
extern uint32_t
sctp_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
uint8_t * text, uint32_t textlen, uint8_t * digest);
extern int
extern int
sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
uint8_t * text, uint32_t textlen, uint8_t * digest,
uint32_t digestlen);
extern uint32_t
uint8_t * text, uint32_t textlen, uint8_t * digest, uint32_t digestlen);
extern uint32_t
sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t * key,
uint8_t * text, uint32_t textlen, uint8_t * digest);
extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id);
/* mbuf versions */
extern uint32_t
extern uint32_t
sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
struct mbuf *m, uint32_t m_offset, uint8_t * digest, uint32_t trailer);
extern uint32_t
sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key, struct mbuf *m,
uint32_t m_offset, uint8_t * digest);
extern uint32_t
sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key,
struct mbuf *m, uint32_t m_offset, uint8_t * digest);
/*
* authentication routines
@ -196,31 +207,31 @@ extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
extern int sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
extern int sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
extern void
extern void
sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
uint32_t offset, uint32_t length);
extern void
extern void
sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
struct sctp_auth_chunk *auth,
struct sctp_tcb *stcb);
struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t key_id);
extern struct mbuf *
sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
struct sctp_auth_chunk **auth_ret,
uint32_t * offset, struct sctp_tcb *stcb,
uint8_t chunk);
extern int
struct sctp_auth_chunk **auth_ret, uint32_t * offset,
struct sctp_tcb *stcb, uint8_t chunk);
extern int
sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
struct mbuf *m, uint32_t offset);
extern void
extern void
sctp_notify_authentication(struct sctp_tcb *stcb,
uint32_t indication, uint16_t keyid,
uint16_t alt_keyid);
extern int
sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit);
extern void
sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb);
uint32_t indication, uint16_t keyid, uint16_t alt_keyid, int so_locked);
extern int
sctp_validate_init_auth_params(struct mbuf *m, int offset,
int limit);
extern void
sctp_initialize_auth_params(struct sctp_inpcb *inp,
struct sctp_tcb *stcb);
/* test functions */
extern void sctp_test_hmac_sha1(void);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -313,7 +313,6 @@ __FBSDID("$FreeBSD$");
/* Minimum number of bytes read by user before we
* condsider doing a rwnd update
*/
#define SCTP_MIN_READ_BEFORE_CONSIDERING 3000
/*
* default HMAC for cookies, etc... use one of the AUTH HMAC id's
@ -382,6 +381,14 @@ __FBSDID("$FreeBSD$");
* hit this value) */
#define SCTP_DATAGRAM_RESEND 4
#define SCTP_DATAGRAM_ACKED 10010
/* EY
* If a tsn is nr-gapped, its first tagged as NR_MARKED and then NR_ACKED
* When yet another nr-sack is received, if a particular TSN's sent tag
* is observed to be NR_ACKED after gap-ack info is processed, this implies
* that particular TSN is reneged
*/
#define SCTP_DATAGRAM_NR_ACKED 10020
#define SCTP_DATAGRAM_NR_MARKED 20005
#define SCTP_DATAGRAM_MARKED 20010
#define SCTP_FORWARD_TSN_SKIP 30010
@ -465,6 +472,9 @@ __FBSDID("$FreeBSD$");
#define SCTP_SET_PRIM_ADDR 0xc004
#define SCTP_SUCCESS_REPORT 0xc005
#define SCTP_ULP_ADAPTATION 0xc006
/* behave-nat-draft */
#define SCTP_HAS_NAT_SUPPORT 0xc007
#define SCTP_NAT_VTAGS 0xc008
/* Notification error codes */
#define SCTP_NOTIFY_DATAGRAM_UNSENT 0x0001
@ -553,7 +563,13 @@ __FBSDID("$FreeBSD$");
#define SCTP_INITIAL_MAPPING_ARRAY 16
/* how much we grow the mapping array each call */
#define SCTP_MAPPING_ARRAY_INCR 32
/* EY 05/13/08 - nr_sack version of the previous 3 constants */
/* Maximum the nr mapping array will grow to (TSN mapping array) */
#define SCTP_NR_MAPPING_ARRAY 512
/* size of the inital malloc on the nr mapping array */
#define SCTP_INITIAL_NR_MAPPING_ARRAY 16
/* how much we grow the nr mapping array each call */
#define SCTP_NR_MAPPING_ARRAY_INCR 32
/*
* Here we define the timer types used by the implementation as arguments in
* the set/get timer type calls.
@ -607,7 +623,6 @@ __FBSDID("$FreeBSD$");
* Number of ticks before the soxwakeup() event that is delayed is sent AFTER
* the accept() call
*/
#define SCTP_EVENTWAKEUP_WAIT_TICKS 3000
/*
* Of course we really don't collect stale cookies, being folks of decerning
@ -616,7 +631,6 @@ __FBSDID("$FreeBSD$");
* up...this is a implemenation dependent treatment. In ours we do not ask
* for a extension of time, but just retry this many times...
*/
#define SCTP_MAX_STALE_COOKIES_I_COLLECT 10
/* max number of TSN's dup'd that I will hold */
#define SCTP_MAX_DUP_TSNS 20
@ -625,11 +639,8 @@ __FBSDID("$FreeBSD$");
* Here we define the types used when setting the retry amounts.
*/
/* constants for type of set */
#define SCTP_MAXATTEMPT_INIT 2
#define SCTP_MAXATTEMPT_SEND 3
/* Maximum TSN's we will summarize in a drop report */
#define SCTP_MAX_DROP_REPORT 16
/* How many drop re-attempts we make on INIT/COOKIE-ECHO */
#define SCTP_RETRY_DROPPED_THRESH 4
@ -638,7 +649,6 @@ __FBSDID("$FreeBSD$");
* And the max we will keep a history of in the tcb which MUST be lower than
* 256.
*/
#define SCTP_MAX_DROP_SAVE_REPORT 16
/*
* Here we define the default timers and the default number of attemts we
@ -711,15 +721,8 @@ __FBSDID("$FreeBSD$");
#define SCTP_DEF_MAX_PATH_RTX 5
#define SCTP_DEF_PMTU_RAISE_SEC 600 /* 10 min between raise attempts */
#define SCTP_DEF_PMTU_MIN 600
#define SCTP_MSEC_IN_A_SEC 1000
#define SCTP_USEC_IN_A_SEC 1000000
#define SCTP_NSEC_IN_A_SEC 1000000000
#define SCTP_MAX_OUTSTANDING_DG 10000
/* How many streams I request initally by default */
#define SCTP_OSTREAM_INITIAL 10
@ -727,9 +730,7 @@ __FBSDID("$FreeBSD$");
* How many smallest_mtu's need to increase before a window update sack is
* sent (should be a power of 2).
*/
#define SCTP_SEG_TO_RWND_UPD 32
/* Send window update (incr * this > hiwat). Should be a power of 2 */
#define SCTP_SCALE_OF_RWND_TO_UPD 4
#define SCTP_MINIMAL_RWND (4096) /* minimal rwnd */
#define SCTP_ADDRMAX 24
@ -786,15 +787,6 @@ __FBSDID("$FreeBSD$");
/* amount peer is obligated to have in rwnd or I will abort */
#define SCTP_MIN_RWND 1500
#define SCTP_WINDOW_MIN 1500 /* smallest rwnd can be */
#define SCTP_WINDOW_MAX 1048576 /* biggest I can grow rwnd to My playing
* around suggests a value greater than 64k
* does not do much, I guess via the kernel
* limitations on the stream/socket. */
/* I can handle a 1meg re-assembly */
#define SCTP_DEFAULT_MAXMSGREASM 1048576
#define SCTP_DEFAULT_MAXSEGMENT 65535
#define SCTP_CHUNK_BUFFER_SIZE 512
@ -813,33 +805,35 @@ __FBSDID("$FreeBSD$");
/*
* SCTP upper layer notifications
*/
#define SCTP_NOTIFY_ASSOC_UP 1
#define SCTP_NOTIFY_ASSOC_DOWN 2
#define SCTP_NOTIFY_INTERFACE_DOWN 3
#define SCTP_NOTIFY_INTERFACE_UP 4
#define SCTP_NOTIFY_DG_FAIL 5
#define SCTP_NOTIFY_STRDATA_ERR 6
#define SCTP_NOTIFY_ASSOC_ABORTED 7
#define SCTP_NOTIFY_PEER_OPENED_STREAM 8
#define SCTP_NOTIFY_STREAM_OPENED_OK 9
#define SCTP_NOTIFY_ASSOC_RESTART 10
#define SCTP_NOTIFY_HB_RESP 11
#define SCTP_NOTIFY_ASCONF_SUCCESS 12
#define SCTP_NOTIFY_ASCONF_FAILED 13
#define SCTP_NOTIFY_PEER_SHUTDOWN 14
#define SCTP_NOTIFY_ASCONF_ADD_IP 15
#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
#define SCTP_NOTIFY_ASSOC_UP 1
#define SCTP_NOTIFY_ASSOC_DOWN 2
#define SCTP_NOTIFY_INTERFACE_DOWN 3
#define SCTP_NOTIFY_INTERFACE_UP 4
#define SCTP_NOTIFY_DG_FAIL 5
#define SCTP_NOTIFY_STRDATA_ERR 6
#define SCTP_NOTIFY_ASSOC_ABORTED 7
#define SCTP_NOTIFY_PEER_OPENED_STREAM 8
#define SCTP_NOTIFY_STREAM_OPENED_OK 9
#define SCTP_NOTIFY_ASSOC_RESTART 10
#define SCTP_NOTIFY_HB_RESP 11
#define SCTP_NOTIFY_ASCONF_SUCCESS 12
#define SCTP_NOTIFY_ASCONF_FAILED 13
#define SCTP_NOTIFY_PEER_SHUTDOWN 14
#define SCTP_NOTIFY_ASCONF_ADD_IP 15
#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 18
#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
#define SCTP_NOTIFY_STR_RESET_RECV 21
#define SCTP_NOTIFY_STR_RESET_SEND 22
#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 23
#define SCTP_NOTIFY_STR_RESET_FAILED_IN 24
#define SCTP_NOTIFY_AUTH_NEW_KEY 25
#define SCTP_NOTIFY_AUTH_KEY_CONFLICT 26
#define SCTP_NOTIFY_SPECIAL_SP_FAIL 27
#define SCTP_NOTIFY_MAX 27
#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
#define SCTP_NOTIFY_STR_RESET_RECV 21
#define SCTP_NOTIFY_STR_RESET_SEND 22
#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 23
#define SCTP_NOTIFY_STR_RESET_FAILED_IN 24
#define SCTP_NOTIFY_AUTH_NEW_KEY 25
#define SCTP_NOTIFY_AUTH_FREE_KEY 26
#define SCTP_NOTIFY_SPECIAL_SP_FAIL 27
#define SCTP_NOTIFY_NO_PEER_AUTH 28
#define SCTP_NOTIFY_SENDER_DRY 29
#define SCTP_NOTIFY_MAX 29
/* This is the value for messages that are NOT completely
* copied down where we will start to split the message.
@ -970,7 +964,6 @@ __FBSDID("$FreeBSD$");
#endif /* !IPPROTO_SCTP */
#define SCTP_MAX_DATA_BUNDLING 256
#define SCTP_MAX_CONTROL_BUNDLING 20
/* modular comparison */
/* True if a > b (mod = M) */

View File

@ -121,6 +121,14 @@ struct sctp_asconf_addr_param { /* an ASCONF address parameter */
struct sctp_ipv6addr_param addrp; /* max storage size */
} SCTP_PACKED;
struct sctp_asconf_tag_param { /* an ASCONF NAT-Vtag parameter */
struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
uint32_t local_vtag;
uint32_t remote_vtag;
} SCTP_PACKED;
struct sctp_asconf_addrv4_param { /* an ASCONF address (v4) parameter */
struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
struct sctp_ipv4addr_param addrp; /* max storage size */
@ -206,6 +214,15 @@ struct sctp_state_cookie { /* this is our definition... */
*/
} SCTP_PACKED;
/* Used for NAT state error cause */
struct sctp_missing_nat_state {
uint16_t cause;
uint16_t length;
uint8_t data[0];
} SCTP_PACKED;
struct sctp_inv_mandatory_param {
uint16_t cause;
uint16_t length;
@ -268,6 +285,30 @@ struct sctp_sack_chunk {
} SCTP_PACKED;
/* EY Following 3 structs define NR Selective Ack (NR_SACK) chunk */
struct sctp_nr_gap_ack_block {
uint16_t start; /* NR Gap Ack block start */
uint16_t end; /* NR Gap Ack block end */
} SCTP_PACKED;
struct sctp_nr_sack {
uint32_t cum_tsn_ack; /* cumulative TSN Ack */
uint32_t a_rwnd; /* updated a_rwnd of sender */
uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
uint16_t num_nr_gap_ack_blks; /* number of NR Gap Ack blocks */
uint16_t num_dup_tsns; /* number of duplicate TSNs */
uint16_t reserved; /* not currently used */
/* struct sctp_gap_ack_block's follow */
/* struct sctp_nr_gap_ack_block's follow */
/* uint32_t duplicate_tsn's follow */
} SCTP_PACKED;
struct sctp_nr_sack_chunk {
struct sctp_chunkhdr ch;
struct sctp_nr_sack nr_sack;
} SCTP_PACKED;
/* Heartbeat Request (HEARTBEAT) */
struct sctp_heartbeat {
struct sctp_heartbeat_info_param hb_info;

File diff suppressed because it is too large Load Diff

View File

@ -99,6 +99,16 @@ void
sctp_handle_sack(struct mbuf *m, int offset, struct sctp_sack_chunk *, struct sctp_tcb *,
struct sctp_nets *, int *, int, uint32_t);
/* EY does "exactly" the same as sctp_express_handle_sack */
void
sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
uint32_t rwnd, int nonce_sum_flag, int *abort_now);
/* EY nr_sack version of sctp_handle_sack */
void
sctp_handle_nr_sack(struct mbuf *m, int offset, struct sctp_nr_sack_chunk *, struct sctp_tcb *,
struct sctp_nets *, int *, int, uint32_t);
/* draft-ietf-tsvwg-usctp */
void
sctp_handle_forward_tsn(struct sctp_tcb *,

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -316,6 +316,8 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
asoc->streamoutcnt = asoc->pre_open_streams;
/* init tsn's */
asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@ -323,6 +325,11 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
/*
* EY 05/13/08 - nr_sack: initialize nr_mapping array's base tsn
* like above
*/
asoc->nr_mapping_array_base_tsn = ntohl(init->initial_tsn);
asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
asoc->last_echo_tsn = asoc->asconf_seq_in;
asoc->advanced_peer_ack_point = asoc->last_acked_seq;
@ -393,6 +400,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
struct mbuf *op_err;
int retval, abort_flag;
uint32_t initack_limit;
int nat_friendly = 0;
/* First verify that we have no illegal param's */
abort_flag = 0;
@ -400,7 +408,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
op_err = sctp_arethere_unrecognized_parameters(m,
(offset + sizeof(struct sctp_init_chunk)),
&abort_flag, (struct sctp_chunkhdr *)cp);
&abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
if (abort_flag) {
/* Send an abort and notify peer */
sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
@ -408,6 +416,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
return (-1);
}
asoc = &stcb->asoc;
asoc->peer_supports_nat = (uint8_t) nat_friendly;
/* process the peer's parameters in the INIT-ACK */
retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
if (retval < 0) {
@ -637,6 +646,69 @@ sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
}
}
static int
sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
{
/*
* return 0 means we want you to proceed with the abort non-zero
* means no abort processing
*/
struct sctpasochead *head;
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
/* generate a new vtag and send init */
LIST_REMOVE(stcb, sctp_asocs);
stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
/*
* put it in the bucket in the vtag hash of assoc's for the
* system
*/
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
return (1);
}
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
/*
* treat like a case where the cookie expired i.e.: - dump
* current cookie. - generate a new vtag. - resend init.
*/
/* generate a new vtag and send init */
LIST_REMOVE(stcb, sctp_asocs);
stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
sctp_stop_all_cookie_timers(stcb);
sctp_toss_old_cookies(stcb, &stcb->asoc);
stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
/*
* put it in the bucket in the vtag hash of assoc's for the
* system
*/
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
return (1);
}
return (0);
}
static int
sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
struct sctp_nets *net)
{
/*
* return 0 means we want you to proceed with the abort non-zero
* means no abort processing
*/
if (stcb->asoc.peer_supports_auth == 0) {
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
return (0);
}
sctp_asconf_send_nat_state_update(stcb, net);
return (1);
}
static void
sctp_handle_abort(struct sctp_abort_chunk *cp,
struct sctp_tcb *stcb, struct sctp_nets *net)
@ -645,11 +717,40 @@ sctp_handle_abort(struct sctp_abort_chunk *cp,
struct socket *so;
#endif
uint16_t len;
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
if (stcb == NULL)
return;
len = ntohs(cp->ch.chunk_length);
if (len > sizeof(struct sctp_chunkhdr)) {
/*
* Need to check the cause codes for our two magic nat
* aborts which don't kill the assoc necessarily.
*/
struct sctp_abort_chunk *cpnext;
struct sctp_missing_nat_state *natc;
uint16_t cause;
cpnext = cp;
cpnext++;
natc = (struct sctp_missing_nat_state *)cpnext;
cause = ntohs(natc->cause);
if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
cp->ch.chunk_flags);
if (sctp_handle_nat_colliding_state(stcb)) {
return;
}
} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
cp->ch.chunk_flags);
if (sctp_handle_nat_missing_state(stcb, net)) {
return;
}
}
}
/* stop any receive timers */
sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
/* notify user of the abort and clean up... */
@ -926,6 +1027,9 @@ sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
case SCTP_SUPPORTED_CHUNK_EXT:
break;
/* draft-ietf-tsvwg-addip-sctp */
case SCTP_HAS_NAT_SUPPORT:
stcb->asoc.peer_supports_nat = 0;
break;
case SCTP_ECN_NONCE_SUPPORTED:
stcb->asoc.peer_supports_ecn_nonce = 0;
stcb->asoc.ecn_nonce_allowed = 0;
@ -990,6 +1094,20 @@ sctp_handle_error(struct sctp_chunkhdr *ch,
SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
error_type);
break;
case SCTP_CAUSE_NAT_COLLIDING_STATE:
SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
ch->chunk_flags);
if (sctp_handle_nat_colliding_state(stcb)) {
return (0);
}
break;
case SCTP_CAUSE_NAT_MISSING_STATE:
SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
ch->chunk_flags);
if (sctp_handle_nat_missing_state(stcb, net)) {
return (0);
}
break;
case SCTP_CAUSE_STALE_COOKIE:
/*
* We only act if we have echoed a cookie and are
@ -1022,9 +1140,9 @@ sctp_handle_error(struct sctp_chunkhdr *ch,
return (-1);
}
/* blast back to INIT state */
sctp_toss_old_cookies(stcb, &stcb->asoc);
asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
asoc->state |= SCTP_STATE_COOKIE_WAIT;
sctp_stop_all_cookie_timers(stcb);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
}
@ -1213,6 +1331,14 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
return (0);
}
static struct sctp_tcb *
sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
struct sctp_inpcb *inp, struct sctp_nets **netp,
struct sockaddr *init_src, int *notification,
int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
uint32_t vrf_id, uint16_t port);
/*
* handle a state cookie for an existing association m: input packet mbuf
@ -1223,19 +1349,23 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
static struct sctp_tcb *
sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
uint32_t vrf_id)
uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
{
struct sctp_association *asoc;
struct sctp_init_chunk *init_cp, init_buf;
struct sctp_init_ack_chunk *initack_cp, initack_buf;
struct sctp_nets *net;
struct mbuf *op_err;
struct sctp_paramhdr *ph;
int chk_length;
int init_offset, initack_offset, i;
int retval;
int spec_flag = 0;
uint32_t how_indx;
net = *netp;
/* I know that the TCB is non-NULL from the caller */
asoc = &stcb->asoc;
for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
@ -1247,9 +1377,6 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
}
if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/* SHUTDOWN came in after sending INIT-ACK */
struct mbuf *op_err;
struct sctp_paramhdr *ph;
sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
0, M_DONTWAIT, 1, MT_DATA);
@ -1457,9 +1584,50 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
asoc->cookie_how[how_indx] = 6;
return (NULL);
}
if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
(ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
init_cp->init.initiate_tag == 0)) {
/*
* If nat support, and the below and stcb is established, send back
* a ABORT(colliding state) if we are established.
*/
if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
(asoc->peer_supports_nat) &&
((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
(asoc->peer_vtag == 0)))) {
/*
* Special case - Peer's support nat. We may have two init's
* that we gave out the same tag on since one was not
* established.. i.e. we get INIT from host-1 behind the nat
* and we respond tag-a, we get a INIT from host-2 behind
* the nat and we get tag-a again. Then we bring up host-1
* (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
* Now we have colliding state. We must send an abort here
* with colliding state indication.
*/
op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
0, M_DONTWAIT, 1, MT_DATA);
if (op_err == NULL) {
/* FOOBAR */
return (NULL);
}
/* pre-reserve some space */
#ifdef INET6
SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
#else
SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
#endif
SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
/* Set the len */
SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
ph = mtod(op_err, struct sctp_paramhdr *);
ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
ph->param_length = htons(sizeof(struct sctp_paramhdr));
sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
return (NULL);
}
if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
(asoc->peer_vtag == 0))) {
/*
* case B in Section 5.2.4 Table 2: MXAA or MOAA my info
* should be ok, re-accept peer info
@ -1612,6 +1780,17 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
cookie->tie_tag_peer_vtag != 0) {
struct sctpasochead *head;
if (asoc->peer_supports_nat) {
/*
* This is a gross gross hack. just call the
* cookie_new code since we are allowing a duplicate
* association. I hope this works...
*/
return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
inp, netp, init_src, notification,
auth_skipped, auth_offset, auth_len,
vrf_id, port));
}
/*
* case A in Section 5.2.4 Table 2: XXMM (peer restarted)
*/
@ -1660,6 +1839,12 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
memset(asoc->mapping_array, 0,
asoc->mapping_array_size);
}
/* EY 05/13/08 - nr_sack version of the above if statement */
if (asoc->nr_mapping_array && SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)
&& asoc->peer_supports_nr_sack) {
memset(asoc->nr_mapping_array, 0,
asoc->nr_mapping_array_size);
}
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_INFO_WLOCK();
SCTP_INP_WLOCK(stcb->sctp_ep);
@ -1689,14 +1874,6 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
*/
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
/* Is this the first restart? */
if (stcb->asoc.in_restart_hash == 0) {
/* Ok add it to assoc_id vtag hash */
head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
SCTP_BASE_INFO(hashrestartmark))];
LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
stcb->asoc.in_restart_hash = 1;
}
/* process the INIT info (peer's info) */
SCTP_TCB_SEND_UNLOCK(stcb);
SCTP_INP_WUNLOCK(stcb->sctp_ep);
@ -1746,7 +1923,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
* cookie-echo chunk length: length of the cookie chunk to: where the init
* was from returns a new TCB
*/
static struct sctp_tcb *
struct sctp_tcb *
sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
struct sctp_inpcb *inp, struct sctp_nets **netp,
@ -1886,7 +2063,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
}
/* process the INIT-ACK info (my info) */
old_tag = asoc->my_vtag;
asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
@ -2089,6 +2266,18 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
return (stcb);
}
/*
* CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
* we NEED to make sure we are not already using the vtag. If so we
* need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
SCTP_BASE_INFO(hashasocmark))];
LIST_FOREACH(stcb, head, sctp_asocs) {
if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
-- SEND ABORT - TRY AGAIN --
}
}
*/
/*
* handles a COOKIE-ECHO message stcb: modified to either a new or left as
@ -2422,8 +2611,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
/* this is abnormal... cookie-echo on existing TCB */
had_a_existing_tcb = 1;
*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
cookie, cookie_len, *inp_p, *stcb, *netp, to,
&notification, &sac_restart_id, vrf_id);
cookie, cookie_len, *inp_p, *stcb, netp, to,
&notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
}
if (*stcb == NULL) {
@ -2544,8 +2733,6 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
inp->sctp_ep.local_auth_chunks =
sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
(void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
&inp->sctp_ep.shared_keys);
/*
* Now we must move it from one hash table to
@ -3040,6 +3227,10 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
/* resend the sack */
sctp_send_sack(stcb);
break;
/* EY for nr_sacks */
case SCTP_NR_SELECTIVE_ACK:
sctp_send_nr_sack(stcb); /* EY resend the nr-sack */
break;
case SCTP_HEARTBEAT_REQUEST:
/* resend a demand HB */
if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
@ -3296,6 +3487,17 @@ sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
/*
* EY 05/13/08 - nr_sack: to keep
* nr_mapping array be consistent
* with mapping_array
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.mapping_array_base_tsn;
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
}
stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
@ -3402,6 +3604,15 @@ sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
/*
* EY 05/13/08 -nr_sack: to keep nr_mapping array consistent
* with mapping array
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
}
atomic_add_int(&stcb->asoc.sending_seq, 1);
/* save off historical data for retrans */
stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
@ -3940,7 +4151,7 @@ __attribute__((noinline))
if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
break;
stcb = sctp_findassociation_ep_asconf(m, iphlen,
*offset, sh, &inp, netp);
*offset, sh, &inp, netp, vrf_id);
if (stcb != NULL)
break;
asconf_offset += SCTP_SIZE32(asconf_len);
@ -4043,6 +4254,8 @@ __attribute__((noinline))
* process all control chunks...
*/
if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
/* EY */
(ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
(ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
(SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
/* implied cookie-ack.. we must have lost the ack */
@ -4328,6 +4541,11 @@ __attribute__((noinline))
sctp_handle_sack(m, *offset,
sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
}
if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
(stcb->asoc.stream_queue_cnt == 0)) {
sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
}
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
@ -4335,6 +4553,102 @@ __attribute__((noinline))
}
}
break;
/*
* EY - nr_sack: If the received chunk is an
* nr_sack chunk
*/
case SCTP_NR_SELECTIVE_ACK:
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
SCTP_STAT_INCR(sctps_recvsacks);
{
struct sctp_nr_sack_chunk *nr_sack;
int abort_now = 0;
uint32_t a_rwnd, cum_ack;
uint16_t num_seg, num_nr_seg;
int nonce_sum_flag, all_bit;
if ((stcb == NULL) || (chk_length < sizeof(struct sctp_nr_sack_chunk))) {
SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on nr_sack chunk, too small\n");
ignore_nr_sack:
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
}
return (NULL);
}
/*
* EY nr_sacks have not been negotiated but
* the peer end sent an nr_sack, silently
* discard the chunk
*/
if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)) {
goto unknown_chunk;
}
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/*-
* If we have sent a shutdown-ack, we will pay no
* attention to a sack sent in to us since
* we don't care anymore.
*/
goto ignore_nr_sack;
}
nr_sack = (struct sctp_nr_sack_chunk *)ch;
nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
all_bit = ch->chunk_flags & SCTP_NR_SACK_ALL_BIT;
cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
/*
* EY -if All bit is set, then there are as
* many gaps as nr_gaps
*/
if (all_bit) {
num_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
}
num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
cum_ack,
num_seg,
a_rwnd
);
stcb->asoc.seen_a_sack_this_pkt = 1;
if ((stcb->asoc.pr_sctp_cnt == 0) &&
(num_seg == 0) &&
((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
(cum_ack == stcb->asoc.last_acked_seq)) &&
(stcb->asoc.saw_sack_with_frags == 0) &&
(!TAILQ_EMPTY(&stcb->asoc.sent_queue))
) {
/*
* We have a SIMPLE sack having no
* prior segments and data on sent
* queue to be acked.. Use the
* faster path sack processing. We
* also allow window update sacks
* with no missing segments to go
* this way too.
*/
sctp_express_handle_nr_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
&abort_now);
} else {
if (netp && *netp)
sctp_handle_nr_sack(m, *offset,
nr_sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
}
if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
(stcb->asoc.stream_queue_cnt == 0)) {
sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
}
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
return (NULL);
}
}
break;
case SCTP_HEARTBEAT_REQUEST:
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
if ((stcb) && netp && *netp) {
@ -5242,6 +5556,18 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
return;
}
#if 0
static void
sctp_print_mbuf_chain(struct mbuf *m)
{
for (; m; m = SCTP_BUF_NEXT(m)) {
printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
if (SCTP_BUF_IS_EXTENDED(m))
printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
}
}
#endif
void
sctp_input_with_port(i_pak, off, port)

View File

@ -500,3 +500,24 @@ sctp_get_mbuf_for_msg(unsigned int space_needed,
#define MD5_Final MD5Final
#endif
#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
#if defined(INVARIANTS)
#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
{ \
int32_t oldval; \
oldval = atomic_fetchadd_int(addr, -val); \
if (oldval < val) { \
panic("Counter goes negative"); \
} \
}
#else
#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
{ \
int32_t oldval; \
oldval = atomic_fetchadd_int(addr, -val); \
if (oldval < val) { \
*addr = 0; \
} \
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -88,7 +88,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
struct mbuf *
sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
struct sctp_chunkhdr *);
struct sctp_chunkhdr *, int *);
void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *);
int
@ -150,6 +150,9 @@ void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
void sctp_send_sack(struct sctp_tcb *);
/* EY 05/07/08 if nr_sacks used, the following function will be called instead of sctp_send_sack */
void sctp_send_nr_sack(struct sctp_tcb *);
int sctp_send_hb(struct sctp_tcb *, int, struct sctp_nets *);
void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -226,10 +226,7 @@ sctp_find_vrf(uint32_t vrf_id)
void
sctp_free_vrf(struct sctp_vrf *vrf)
{
int ret;
ret = atomic_fetchadd_int(&vrf->refcount, -1);
if (ret == 1) {
if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) {
if (vrf->vrf_addr_hash) {
SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
vrf->vrf_addr_hash = NULL;
@ -244,10 +241,7 @@ sctp_free_vrf(struct sctp_vrf *vrf)
void
sctp_free_ifn(struct sctp_ifn *sctp_ifnp)
{
int ret;
ret = atomic_fetchadd_int(&sctp_ifnp->refcount, -1);
if (ret == 1) {
if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) {
/* We zero'd the count */
if (sctp_ifnp->vrf) {
sctp_free_vrf(sctp_ifnp->vrf);
@ -272,10 +266,7 @@ sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu)
void
sctp_free_ifa(struct sctp_ifa *sctp_ifap)
{
int ret;
ret = atomic_fetchadd_int(&sctp_ifap->refcount, -1);
if (ret == 1) {
if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) {
/* We zero'd the count */
if (sctp_ifap->ifn_p) {
sctp_free_ifn(sctp_ifap->ifn_p);
@ -1295,7 +1286,7 @@ sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
*/
struct sctp_tcb *
sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
{
/*
* Use my the assoc_id to find a endpoint
@ -1304,33 +1295,29 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int
struct sctp_tcb *stcb;
uint32_t id;
if (asoc_id == 0 || inp == NULL) {
if (inp == NULL) {
SCTP_PRINTF("TSNH ep_associd\n");
return (NULL);
}
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
SCTP_PRINTF("TSNH ep_associd0\n");
return (NULL);
}
SCTP_INP_INFO_RLOCK();
id = (uint32_t) asoc_id;
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(id,
SCTP_BASE_INFO(hashasocmark))];
head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
if (head == NULL) {
/* invalid id TSNH */
SCTP_INP_INFO_RUNLOCK();
SCTP_PRINTF("TSNH ep_associd1\n");
return (NULL);
}
LIST_FOREACH(stcb, head, sctp_asocs) {
SCTP_INP_RLOCK(stcb->sctp_ep);
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
SCTP_INP_RUNLOCK(stcb->sctp_ep);
SCTP_INP_INFO_RUNLOCK();
return (NULL);
}
LIST_FOREACH(stcb, head, sctp_tcbasocidhash) {
if (stcb->asoc.assoc_id == id) {
/* candidate */
if (inp != stcb->sctp_ep) {
/*
* some other guy has the same id active (id
* collision ??).
*/
SCTP_INP_RUNLOCK(stcb->sctp_ep);
SCTP_PRINTF("TSNH ep_associd2\n");
continue;
}
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
@ -1339,58 +1326,25 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int
if (want_lock) {
SCTP_TCB_LOCK(stcb);
}
SCTP_INP_RUNLOCK(stcb->sctp_ep);
SCTP_INP_INFO_RUNLOCK();
return (stcb);
}
SCTP_INP_RUNLOCK(stcb->sctp_ep);
}
/* Ok if we missed here, lets try the restart hash */
head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(id, SCTP_BASE_INFO(hashrestartmark))];
if (head == NULL) {
/* invalid id TSNH */
SCTP_INP_INFO_RUNLOCK();
return (NULL);
}
LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
SCTP_INP_RLOCK(stcb->sctp_ep);
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
SCTP_INP_RUNLOCK(stcb->sctp_ep);
continue;
}
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
continue;
}
if (want_lock) {
SCTP_TCB_LOCK(stcb);
}
if (stcb->asoc.assoc_id == id) {
/* candidate */
SCTP_INP_RUNLOCK(stcb->sctp_ep);
if (inp != stcb->sctp_ep) {
/*
* some other guy has the same id active (id
* collision ??).
*/
if (want_lock) {
SCTP_TCB_UNLOCK(stcb);
}
continue;
}
SCTP_INP_INFO_RUNLOCK();
return (stcb);
} else {
SCTP_INP_RUNLOCK(stcb->sctp_ep);
}
if (want_lock) {
SCTP_TCB_UNLOCK(stcb);
}
}
SCTP_INP_INFO_RUNLOCK();
return (NULL);
}
struct sctp_tcb *
sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
{
struct sctp_tcb *stcb;
SCTP_INP_RLOCK(inp);
stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock);
SCTP_INP_RUNLOCK(inp);
return (stcb);
}
static struct sctp_inpcb *
sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
uint16_t lport, uint32_t vrf_id)
@ -1434,6 +1388,7 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
if (head == NULL)
return (NULL);
LIST_FOREACH(inp, head, sctp_hash) {
SCTP_INP_RLOCK(inp);
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
@ -1695,8 +1650,11 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
* If the TCP model exists it could be that the main listening
* endpoint is gone but there exists a connected socket for this guy
* yet. If so we can return the first one that we find. This may NOT
* be the correct one but the sctp_findassociation_ep_addr has
* further code to look at all TCP models.
* be the correct one so the caller should be wary on the return
* INP. Currently the onlyc caller that sets this flag is in bindx
* where we are verifying that a user CAN bind the address. He
* either has bound it already, or someone else has, or its open to
* bind, so this is good enough.
*/
if (inp == NULL && find_tcp_pool) {
head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))];
@ -1844,11 +1802,67 @@ sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
return (NULL);
}
static int
sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to)
{
struct sctp_nets *net;
/*
* Simple question, the ports match, does the tcb own the to
* address?
*/
if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
/* of course */
return (1);
}
/* have to look at all bound addresses */
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
if (net->ro._l_addr.sa.sa_family != to->sa_family) {
/* not the same family, can't be a match */
continue;
}
switch (to->sa_family) {
case AF_INET:
{
struct sockaddr_in *sin, *rsin;
sin = (struct sockaddr_in *)&net->ro._l_addr;
rsin = (struct sockaddr_in *)to;
if (sin->sin_addr.s_addr ==
rsin->sin_addr.s_addr) {
/* found it */
return (1);
}
break;
}
#ifdef INET6
case AF_INET6:
{
struct sockaddr_in6 *sin6, *rsin6;
sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
rsin6 = (struct sockaddr_in6 *)to;
if (SCTP6_ARE_ADDR_EQUAL(sin6,
rsin6)) {
/* Update the endpoint pointer */
return (1);
}
break;
}
#endif
default:
/* TSNH */
break;
}
}
/* Nope, do not have the address ;-( */
return (0);
}
static struct sctp_tcb *
sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag,
struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
uint16_t lport, int skip_src_check)
uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag)
{
/*
* Use my vtag to hash. If we find it we then verify the source addr
@ -1880,18 +1894,10 @@ sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
if (stcb->asoc.my_vtag == vtag) {
/* candidate */
if (stcb->rport != rport) {
/*
* we could remove this if vtags are unique
* across the system.
*/
SCTP_TCB_UNLOCK(stcb);
continue;
}
if (stcb->sctp_ep->sctp_lport != lport) {
/*
* we could remove this if vtags are unique
* across the system.
*/
SCTP_TCB_UNLOCK(stcb);
continue;
}
@ -1899,8 +1905,33 @@ sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
SCTP_TCB_UNLOCK(stcb);
continue;
}
/* RRS:Need toaddr check here */
if (sctp_does_stcb_own_this_addr(stcb, to) == 0) {
/* Endpoint does not own this address */
SCTP_TCB_UNLOCK(stcb);
continue;
}
if (remote_tag) {
/*
* If we have both vtags thats all we match
* on
*/
if (stcb->asoc.peer_vtag == remote_tag) {
/*
* If both tags match we consider it
* conclusive and check NO
* source/destination addresses
*/
goto conclusive;
}
}
if (skip_src_check) {
*netp = NULL; /* unknown */
conclusive:
if (from) {
net = sctp_findnet(stcb, from);
} else {
*netp = NULL; /* unknown */
}
if (inp_p)
*inp_p = stcb->sctp_ep;
SCTP_INP_INFO_RUNLOCK();
@ -1985,14 +2016,8 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
/* Currently not supported. */
return (NULL);
}
if (sh->v_tag) {
/* we only go down this path if vtag is non-zero */
retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
inp_p, netp, sh->src_port, sh->dest_port, 0);
if (retval) {
return (retval);
}
}
switch (iph->ip_v) {
case IPVERSION:
{
@ -2032,6 +2057,14 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
/* TSNH */
break;
}
if (sh->v_tag) {
/* we only go down this path if vtag is non-zero */
retval = sctp_findassoc_by_vtag(from, to, ntohl(sh->v_tag),
inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0);
if (retval) {
return (retval);
}
}
find_tcp_pool = 0;
if ((ch->chunk_type != SCTP_INITIATION) &&
(ch->chunk_type != SCTP_INITIATION_ACK) &&
@ -2084,7 +2117,7 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
*/
struct sctp_tcb *
sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
{
struct sctp_tcb *stcb;
struct sockaddr_in *sin;
@ -2094,6 +2127,7 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
#endif
struct sockaddr_storage local_store, remote_store;
struct sockaddr *to;
struct ip *iph;
#ifdef INET6
@ -2107,7 +2141,7 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
memset(&local_store, 0, sizeof(local_store));
memset(&remote_store, 0, sizeof(remote_store));
to = (struct sockaddr *)&local_store;
/* First get the destination address setup too. */
iph = mtod(m, struct ip *);
switch (iph->ip_v) {
@ -2202,8 +2236,8 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
}
if (zero_address) {
stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p,
netp, sh->src_port, sh->dest_port, 1);
stcb = sctp_findassoc_by_vtag(NULL, to, ntohl(sh->v_tag), inp_p,
netp, sh->src_port, sh->dest_port, 1, vrf_id, 0);
/*
* printf("findassociation_ep_asconf: zero lookup address
* finds stcb 0x%x\n", (uint32_t)stcb);
@ -2211,7 +2245,7 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
} else {
stcb = sctp_findassociation_ep_addr(inp_p,
(struct sockaddr *)&remote_store, netp,
(struct sockaddr *)&local_store, NULL);
to, NULL);
}
return (stcb);
}
@ -2256,10 +2290,16 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
/* setup socket pointers */
inp->sctp_socket = so;
inp->ip_inp.inp.inp_socket = so;
inp->sctp_associd_counter = 1;
inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT;
inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
/* init the small hash table we use to track asocid <-> tcb */
inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark);
if (inp->sctp_asocidhash == NULL) {
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
SCTP_INP_INFO_WUNLOCK();
return error;
}
#ifdef IPSEC
{
struct inpcbpolicy *pcb_sp = NULL;
@ -2597,8 +2637,9 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
sin = (struct sockaddr_in *)addr;
lport = sin->sin_port;
/*
* For LOOPBACK the prison_local_ip4() call will transmute the ip address
* to the proper value.
* For LOOPBACK the prison_local_ip4() call
* will transmute the ip address to the
* proper value.
*/
if (p && prison_local_ip4(p->td_ucred, &sin->sin_addr) != 0) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
@ -2627,8 +2668,9 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
lport = sin6->sin6_port;
/*
* For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address
* to the proper value.
* For LOOPBACK the prison_local_ip6() call
* will transmute the ipv6 address to the
* proper value.
*/
if (p && prison_local_ip6(p->td_ucred, &sin6->sin6_addr,
(SCTP_IPV6_V6ONLY(inp) != 0)) != 0) {
@ -3373,6 +3415,10 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE;
/* Clear the read queue */
if ((inp->sctp_asocidhash) != NULL) {
SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark);
inp->sctp_asocidhash = NULL;
}
/* sa_ignore FREED_MEMORY */
while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) {
/* Its only abandoned if it had data left */
@ -3864,6 +3910,32 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
}
static uint32_t
sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
{
uint32_t id;
struct sctpasochead *head;
struct sctp_tcb *lstcb;
try_again:
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
/* TSNH */
return (0);
}
SCTP_INP_WLOCK(inp);
id = inp->sctp_associd_counter;
inp->sctp_associd_counter++;
lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t) id, 0);
if (lstcb) {
goto try_again;
}
head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash);
stcb->asoc.in_asocid_hash = 1;
SCTP_INP_WUNLOCK(inp);
return id;
}
/*
* allocate an association and add it to the endpoint. The caller must be
* careful to add all additional addresses once they are know right away or
@ -3983,8 +4055,11 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
bzero(stcb, sizeof(*stcb));
asoc = &stcb->asoc;
asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb);
SCTP_TCB_LOCK_INIT(stcb);
SCTP_TCB_SEND_LOCK_INIT(stcb);
stcb->rport = rport;
/* setup back pointer's */
stcb->sctp_ep = inp;
stcb->sctp_socket = inp->sctp_socket;
@ -3992,19 +4067,20 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
/* failed */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
LIST_REMOVE(stcb, sctp_tcbasocidhash);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_DECR_ASOC_COUNT();
*error = err;
return (NULL);
}
/* and the port */
stcb->rport = rport;
SCTP_INP_INFO_WLOCK();
SCTP_INP_WLOCK(inp);
if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
/* inpcb freed while alloc going on */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
LIST_REMOVE(stcb, sctp_tcbasocidhash);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_INP_WUNLOCK(inp);
SCTP_INP_INFO_WUNLOCK();
@ -4016,12 +4092,12 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
SCTP_TCB_LOCK(stcb);
/* now that my_vtag is set, add it to the hash */
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
SCTP_BASE_INFO(hashasocmark))];
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
/* put it in the bucket in the vtag hash of assoc's for the system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
sctp_delete_from_timewait(stcb->asoc.my_vtag);
#ifdef MICHAELS_EXPERIMENT
sctp_delete_from_timewait(stcb->asoc.my_vtag, inp->sctp_lport, stcb->rport);
#endif
SCTP_INP_INFO_WUNLOCK();
if ((err = sctp_add_remote_addr(stcb, firstaddr, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
@ -4037,6 +4113,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
SCTP_DECR_ASOC_COUNT();
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
LIST_REMOVE(stcb, sctp_tcbasocidhash);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_INP_WUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
@ -4159,7 +4236,7 @@ sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
}
void
sctp_delete_from_timewait(uint32_t tag)
sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
{
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
@ -4170,9 +4247,13 @@ sctp_delete_from_timewait(uint32_t tag)
if (!SCTP_LIST_EMPTY(chain)) {
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
if (twait_block->vtag_block[i].v_tag == tag) {
if ((twait_block->vtag_block[i].v_tag == tag) &&
(twait_block->vtag_block[i].lport == lport) &&
(twait_block->vtag_block[i].rport == rport)) {
twait_block->vtag_block[i].tv_sec_at_expire = 0;
twait_block->vtag_block[i].v_tag = 0;
twait_block->vtag_block[i].lport = 0;
twait_block->vtag_block[i].rport = 0;
found = 1;
break;
}
@ -4184,7 +4265,7 @@ sctp_delete_from_timewait(uint32_t tag)
}
int
sctp_is_in_timewait(uint32_t tag)
sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
{
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
@ -4196,7 +4277,9 @@ sctp_is_in_timewait(uint32_t tag)
if (!SCTP_LIST_EMPTY(chain)) {
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
if (twait_block->vtag_block[i].v_tag == tag) {
if ((twait_block->vtag_block[i].v_tag == tag) &&
(twait_block->vtag_block[i].lport == lport) &&
(twait_block->vtag_block[i].rport == rport)) {
found = 1;
break;
}
@ -4211,7 +4294,7 @@ sctp_is_in_timewait(uint32_t tag)
void
sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport)
{
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
@ -4230,16 +4313,22 @@ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
twait_block->vtag_block[i].tv_sec_at_expire =
now.tv_sec + time;
twait_block->vtag_block[i].v_tag = tag;
twait_block->vtag_block[i].lport = lport;
twait_block->vtag_block[i].rport = rport;
set = 1;
} else if ((twait_block->vtag_block[i].v_tag) &&
((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) {
/* Audit expires this guy */
twait_block->vtag_block[i].tv_sec_at_expire = 0;
twait_block->vtag_block[i].v_tag = 0;
twait_block->vtag_block[i].lport = 0;
twait_block->vtag_block[i].rport = 0;
if (set == 0) {
/* Reuse it for my new tag */
twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time;
twait_block->vtag_block[i].v_tag = tag;
twait_block->vtag_block[i].lport = lport;
twait_block->vtag_block[i].rport = rport;
set = 1;
}
}
@ -4267,6 +4356,8 @@ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time;
twait_block->vtag_block[0].v_tag = tag;
twait_block->vtag_block[0].lport = lport;
twait_block->vtag_block[0].rport = rport;
}
}
@ -4549,8 +4640,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
if (inp->sctp_tcbhash) {
LIST_REMOVE(stcb, sctp_tcbhash);
}
if (stcb->asoc.in_restart_hash) {
LIST_REMOVE(stcb, sctp_tcbrestarhash);
if (stcb->asoc.in_asocid_hash) {
LIST_REMOVE(stcb, sctp_tcbasocidhash);
}
/* Now lets remove it from the list of ALL associations in the EP */
LIST_REMOVE(stcb, sctp_tcblist);
@ -4561,7 +4652,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
/* pull from vtag hash */
LIST_REMOVE(stcb, sctp_asocs);
sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_TIME_WAIT);
sctp_add_vtag_to_timewait(asoc->my_vtag, inp->sctp_lport, stcb->rport, SCTP_TIME_WAIT);
/*
* Now restop the timers to be sure - this is paranoia at is finest!
@ -4602,6 +4693,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
sctp_free_remote_addr(sp->net);
sctp_free_spbufspace(stcb, asoc, sp);
if (sp->holds_key_ref)
sctp_auth_key_release(stcb, sp->auth_keyid);
/* Free the zone stuff */
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), sp);
SCTP_DECR_STRMOQ_COUNT();
@ -4640,6 +4733,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
if (chk->holds_key_ref)
sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
@ -4657,6 +4752,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
if (chk->holds_key_ref)
sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@ -4680,6 +4777,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
if (chk->holds_key_ref)
sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@ -4703,6 +4802,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
if (chk->holds_key_ref)
sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@ -4727,6 +4828,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
if (chk->holds_key_ref)
sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@ -4749,6 +4852,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
if (chk->holds_key_ref)
sctp_auth_key_release(stcb, chk->auth_keyid);
sctp_free_remote_addr(chk->whoTo);
ccnt++;
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@ -5310,17 +5415,12 @@ sctp_pcb_init()
&SCTP_BASE_INFO(hashtcpmark));
SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize);
/* init the small hash table we use to track restarted asoc's */
SCTP_BASE_INFO(sctp_restarthash) = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE,
&SCTP_BASE_INFO(hashrestartmark));
SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
&SCTP_BASE_INFO(hashvrfmark));
SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
&SCTP_BASE_INFO(vrf_ifn_hashmark));
/* init the zones */
/*
* FIX ME: Should check for NULL returns, but if it does fail we are
@ -5508,8 +5608,6 @@ sctp_pcb_finish(void)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
if (SCTP_BASE_INFO(sctp_restarthash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_restarthash), SCTP_BASE_INFO(hashrestartmark));
}
@ -5900,6 +5998,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
if (lsa) {
(void)sctp_set_primary_addr(stcb, sa, NULL);
}
} else if (ptype == SCTP_HAS_NAT_SUPPORT) {
stcb->asoc.peer_supports_nat = 1;
} else if (ptype == SCTP_PRSCTP_SUPPORTED) {
/* Peer supports pr-sctp */
stcb->asoc.peer_supports_prsctp = 1;
@ -6180,7 +6280,7 @@ sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
}
int
sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int save_in_twait)
sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now, int save_in_twait)
{
/*
* This function serves two purposes. It will see if a TAG can be
@ -6188,54 +6288,45 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
* tag. A secondary function it will do is purge out old tags that
* can be removed.
*/
struct sctpasochead *head;
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
struct sctpasochead *head;
struct sctp_tcb *stcb;
int i;
SCTP_INP_INFO_WLOCK();
chain = &SCTP_BASE_INFO(vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE))];
/* First is the vtag in use ? */
SCTP_INP_INFO_RLOCK();
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
SCTP_BASE_INFO(hashasocmark))];
if (head == NULL) {
goto check_restart;
/* invalid vtag */
goto skip_vtag_check;
}
LIST_FOREACH(stcb, head, sctp_asocs) {
/*
* We choose not to lock anything here. TCB's can't be
* removed since we have the read lock, so they can't be
* freed on us, same thing for the INP. I may be wrong with
* this assumption, but we will go with it for now :-)
*/
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
continue;
}
if (stcb->asoc.my_vtag == tag) {
/*
* We should remove this if and return 0 always if
* we want vtags unique across all endpoints. For
* now within a endpoint is ok.
*/
if (inp == stcb->sctp_ep) {
/* bad tag, in use */
SCTP_INP_INFO_WUNLOCK();
return (0);
}
}
}
check_restart:
/* Now lets check the restart hash */
head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(tag,
SCTP_BASE_INFO(hashrestartmark))];
if (head == NULL) {
goto check_time_wait;
}
LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
if (stcb->asoc.assoc_id == tag) {
/* candidate */
if (inp == stcb->sctp_ep) {
/* bad tag, in use */
SCTP_INP_INFO_WUNLOCK();
return (0);
if (stcb->rport != rport) {
continue;
}
if (stcb->sctp_ep->sctp_lport != lport) {
continue;
}
/* Its a used tag set */
SCTP_INP_INFO_WUNLOCK();
return (0);
}
}
check_time_wait:
skip_vtag_check:
chain = &SCTP_BASE_INFO(vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE))];
/* Now what about timed wait ? */
if (!SCTP_LIST_EMPTY(chain)) {
/*
@ -6252,8 +6343,9 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
/* Audit expires this guy */
twait_block->vtag_block[i].tv_sec_at_expire = 0;
twait_block->vtag_block[i].v_tag = 0;
} else if (twait_block->vtag_block[i].v_tag ==
tag) {
} else if ((twait_block->vtag_block[i].v_tag == tag) &&
(twait_block->vtag_block[i].lport == lport) &&
(twait_block->vtag_block[i].rport == rport)) {
/* Bad tag, sorry :< */
SCTP_INP_INFO_WUNLOCK();
return (0);
@ -6261,6 +6353,8 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
}
}
}
SCTP_INP_INFO_RUNLOCK();
#ifdef MICHAELS_EXPERIMENT
/*-
* Not found, ok to use the tag, add it to the time wait hash
* as well this will prevent two sucessive cookies from getting
@ -6269,9 +6363,12 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
* add this tag to the assoc hash we need to purge it from
* the t-wait hash.
*/
SCTP_INP_INFO_WLOCK();
if (save_in_twait)
sctp_add_vtag_to_timewait(tag, TICKS_TO_SEC(inp->sctp_ep.def_cookie_life));
sctp_add_vtag_to_timewait(tag, TICKS_TO_SEC(inp->sctp_ep.def_cookie_life, lport, rport));
SCTP_INP_INFO_WUNLOCK();
#endif
return (1);
}

View File

@ -133,6 +133,8 @@ struct sctp_block_entry {
struct sctp_timewait {
uint32_t tv_sec_at_expire; /* the seconds from boot to expire */
uint32_t v_tag; /* the vtag that can not be reused */
uint16_t lport; /* the local port used in vtag */
uint16_t rport; /* the remote port used in vtag */
};
struct sctp_tagblock {
@ -148,8 +150,6 @@ struct sctp_epinfo {
struct sctppcbhead *sctp_ephash;
u_long hashmark;
struct sctpasochead *sctp_restarthash;
u_long hashrestartmark;
/*-
* The TCP model represents a substantial overhead in that we get an
* additional hash table to keep explicit connections in. The
@ -411,6 +411,10 @@ struct sctp_inpcb {
uint32_t total_recvs;
uint32_t last_abort_code;
uint32_t total_nospaces;
struct sctpasochead *sctp_asocidhash;
u_long hashasocidmark;
uint32_t sctp_associd_counter;
#ifdef SCTP_ASOCLOG_OF_TSNS
struct sctp_pcbtsn_rlog readlog[SCTP_READ_LOG_SIZE];
uint32_t readlog_index;
@ -424,7 +428,7 @@ struct sctp_tcb {
* table */
LIST_ENTRY(sctp_tcb) sctp_tcblist; /* list of all of the
* TCB's */
LIST_ENTRY(sctp_tcb) sctp_tcbrestarhash; /* next link in restart
LIST_ENTRY(sctp_tcb) sctp_tcbasocidhash; /* next link in asocid
* hash table */
LIST_ENTRY(sctp_tcb) sctp_asocs; /* vtag hash list */
struct sctp_block_entry *block_entry; /* pointer locked by socket
@ -536,13 +540,16 @@ sctp_findassociation_ep_addr(struct sctp_inpcb **,
struct sockaddr *, struct sctp_nets **, struct sockaddr *,
struct sctp_tcb *);
struct sctp_tcb *
sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock);
struct sctp_tcb *
sctp_findassociation_ep_asocid(struct sctp_inpcb *,
sctp_assoc_t, int);
struct sctp_tcb *
sctp_findassociation_ep_asconf(struct mbuf *, int, int,
struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **);
struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **, uint32_t vrf_id);
int sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id);
@ -557,12 +564,12 @@ sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int);
void sctp_delete_from_timewait(uint32_t);
void sctp_delete_from_timewait(uint32_t, uint16_t, uint16_t);
int sctp_is_in_timewait(uint32_t tag);
int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport);
void
sctp_add_vtag_to_timewait(uint32_t, uint32_t);
sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport);
void sctp_add_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *, uint32_t);
@ -593,7 +600,7 @@ int
sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *,
struct sctp_nets *);
int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, struct timeval *, int);
int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, uint16_t lport, uint16_t rport, struct timeval *, int);
/* void sctp_drain(void); */

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -356,6 +356,8 @@ struct sctp_tmit_chunk {
uint16_t send_size;
uint16_t book_size;
uint16_t mbcnt;
uint16_t auth_keyid;
uint8_t holds_key_ref; /* flag if auth keyid refcount is held */
uint8_t pad_inplace;
uint8_t do_rtt;
uint8_t book_size_scale;
@ -435,6 +437,8 @@ struct sctp_stream_queue_pending {
uint16_t stream;
uint16_t strseq;
uint16_t act_flags;
uint16_t auth_keyid;
uint8_t holds_key_ref;
uint8_t msg_is_complete;
uint8_t some_taken;
uint8_t addr_over;
@ -472,6 +476,8 @@ struct sctp_asconf_addr {
struct sctp_asconf_addr_param ap;
struct sctp_ifa *ifa; /* save the ifa for add/del ip */
uint8_t sent; /* has this been sent yet? */
uint8_t special_del; /* not to be used in lookup */
};
struct sctp_scoping {
@ -763,6 +769,12 @@ struct sctp_association {
*/
uint32_t highest_tsn_inside_map;
/* EY - new NR variables used for nr_sack based on mapping_array */
uint8_t *nr_mapping_array;
uint32_t nr_mapping_array_base_tsn;
uint32_t highest_tsn_inside_nr_map;
uint16_t nr_mapping_array_size;
uint32_t last_echo_tsn;
uint32_t last_cwr_tsn;
uint32_t fast_recovery_tsn;
@ -992,6 +1004,8 @@ struct sctp_association {
/* flag to indicate if peer can do asconf */
uint8_t peer_supports_asconf;
/* EY - flag to indicate if peer can do nr_sack */
uint8_t peer_supports_nr_sack;
/* pr-sctp support flag */
uint8_t peer_supports_prsctp;
/* peer authentication support flag */
@ -999,6 +1013,7 @@ struct sctp_association {
/* stream resets are supported by the peer */
uint8_t peer_supports_strreset;
uint8_t peer_supports_nat;
/*
* packet drop's are supported by the peer, we don't really care
* about this but we bookkeep it anyway.
@ -1028,7 +1043,9 @@ struct sctp_association {
uint8_t delayed_connection;
uint8_t ifp_had_enobuf;
uint8_t saw_sack_with_frags;
uint8_t in_restart_hash;
/* EY */
uint8_t saw_sack_with_nr_frags;
uint8_t in_asocid_hash;
uint8_t assoc_up_sent;
uint8_t adaptation_needed;
uint8_t adaptation_sent;
@ -1037,6 +1054,8 @@ struct sctp_association {
uint8_t sctp_cmt_on_off;
uint8_t iam_blocking;
uint8_t cookie_how[8];
/* EY 05/05/08 - NR_SACK variable */
uint8_t sctp_nr_sack_on_off;
/* JRS 5/21/07 - CMT PF variable */
uint8_t sctp_cmt_pf;
/*

View File

@ -81,6 +81,8 @@ sctp_init_sysctls()
SCTP_BASE_SYSCTL(sctp_add_more_threshold) = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default) = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_on_off) = SCTPCTL_CMT_ON_OFF_DEFAULT;
/* EY */
SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) = SCTPCTL_NR_SACK_ON_OFF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_use_dac) = SCTPCTL_CMT_USE_DAC_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_pf) = SCTPCTL_CMT_PF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) = SCTPCTL_CWND_MAXBURST_DEFAULT;
@ -109,6 +111,7 @@ sctp_init_sysctls()
SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable) = SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) = SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly) = SCTPCTL_NAT_FRIENDLY_DEFAULT;
#if defined(SCTP_DEBUG)
SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT;
#endif
@ -574,6 +577,8 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
RANGECHK(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_on_off), SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
/* EY */
RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off), SCTPCTL_NR_SACK_ON_OFF_MIN, SCTPCTL_NR_SACK_ON_OFF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_use_dac), SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_pf), SCTPCTL_CMT_PF_MIN, SCTPCTL_CMT_PF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
@ -601,6 +606,8 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
#endif
RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN, SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
#ifdef SCTP_DEBUG
RANGECHK(SCTP_BASE_SYSCTL(sctp_debug_on), SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
#endif
@ -767,6 +774,11 @@ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_cmt_on_off), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_ON_OFF_DESC);
/* EY */
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nr_sack_on_off, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_nr_sack_on_off), 0, sysctl_sctp_check, "IU",
SCTPCTL_NR_SACK_ON_OFF_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_cmt_use_dac), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_USE_DAC_DESC);
@ -880,6 +892,10 @@ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, enable_sack_immediately, CTLTYPE_INT | CTL
&SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), 0, sysctl_sctp_check, "IU",
SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nat_friendly_init, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), 0, sysctl_sctp_check, "IU",
SCTPCTL_NAT_FRIENDLY_DESC);
#ifdef SCTP_DEBUG
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, debug, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_debug_on), 0, sysctl_sctp_check, "IU",

View File

@ -74,6 +74,8 @@ struct sctp_sysctl {
uint32_t sctp_nr_outgoing_streams_default;
uint32_t sctp_cmt_on_off;
uint32_t sctp_cmt_use_dac;
/* EY 5/5/08 - nr_sack flag variable */
uint32_t sctp_nr_sack_on_off;
uint32_t sctp_cmt_pf;
uint32_t sctp_use_cwnd_based_maxburst;
uint32_t sctp_early_fr;
@ -95,6 +97,7 @@ struct sctp_sysctl {
uint32_t sctp_default_frag_interleave;
uint32_t sctp_mobility_base;
uint32_t sctp_mobility_fasthandoff;
uint32_t sctp_inits_include_nat_friendly;
#if defined(SCTP_LOCAL_TRACE_BUF)
struct sctp_log sctp_log;
#endif
@ -322,6 +325,12 @@ struct sctp_sysctl {
#define SCTPCTL_CMT_ON_OFF_MAX 1
#define SCTPCTL_CMT_ON_OFF_DEFAULT 0
/* EY - nr_sack_on_off: NR_SACK on/off flag */
#define SCTPCTL_NR_SACK_ON_OFF_DESC "NR_SACK on/off flag"
#define SCTPCTL_NR_SACK_ON_OFF_MIN 0
#define SCTPCTL_NR_SACK_ON_OFF_MAX 1
#define SCTPCTL_NR_SACK_ON_OFF_DEFAULT 0
/* cmt_use_dac: CMT DAC on/off flag */
#define SCTPCTL_CMT_USE_DAC_DESC "CMT DAC on/off flag"
#define SCTPCTL_CMT_USE_DAC_MIN 0
@ -466,6 +475,13 @@ struct sctp_sysctl {
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX 1
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN
/* Enable sending of the SACK-IMMEDIATELY bit */
#define SCTPCTL_NAT_FRIENDLY_INITS "Enable sending of the nat-friendly SCTP option on INITs."
#define SCTPCTL_NAT_FRIENDLY_INITS_MIN 0
#define SCTPCTL_NAT_FRIENDLY_INITS_MAX 1
#define SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT SCTPCTL_NAT_FRIENDLY_INITS_MIN
#if defined(SCTP_DEBUG)
/* debug: Configure debug output */
#define SCTPCTL_DEBUG_DESC "Configure debug output"

View File

@ -49,7 +49,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_input.h>
#include <netinet/sctp.h>
#include <netinet/sctp_uio.h>
#include <netinet/udp.h>
void
@ -769,8 +769,8 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
(SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
&stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED);
}
continue;
}
continue;
}
if (PR_SCTP_RTX_ENABLED(chk->flags)) {
/* Has it been retransmitted tv_sec times? */
@ -781,8 +781,8 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
(SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
&stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED);
}
continue;
}
continue;
}
if (chk->sent < SCTP_DATAGRAM_RESEND) {
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
@ -1088,7 +1088,11 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* more, request a RTT update
*/
if (sctp_send_hb(stcb, 1, net) < 0)
return 1;
/*
* Less than 0 means we lost
* the assoc
*/
return (1);
}
}
}
@ -1146,7 +1150,8 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* manually.
*/
if (sctp_send_hb(stcb, 1, net) < 0)
return 1;
/* Return less than 0 means we lost the association */
return (1);
}
/*
* Special case for cookie-echo'ed case, we don't do output but must
@ -1789,6 +1794,9 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp,
}
if (net->ro._s_addr) {
mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
if (net->port) {
mtu -= sizeof(struct udphdr);
}
if (mtu > next_mtu) {
net->mtu = next_mtu;
}

View File

@ -56,6 +56,7 @@ struct sctp_event_subscribe {
uint8_t sctp_partial_delivery_event;
uint8_t sctp_adaptation_layer_event;
uint8_t sctp_authentication_event;
uint8_t sctp_sender_dry_event;
uint8_t sctp_stream_reset_events;
};
@ -139,17 +140,18 @@ struct sctp_snd_all_completes {
};
/* Flags that go into the sinfo->sinfo_flags field */
#define SCTP_EOF 0x0100/* Start shutdown procedures */
#define SCTP_ABORT 0x0200/* Send an ABORT to peer */
#define SCTP_UNORDERED 0x0400/* Message is un-ordered */
#define SCTP_ADDR_OVER 0x0800/* Override the primary-address */
#define SCTP_SENDALL 0x1000/* Send this on all associations */
#define SCTP_EOR 0x2000/* end of message signal */
#define SCTP_PR_POLICY_VALID 0x4000 /* pr sctp policy valid */
#define SCTP_EOF 0x0100 /* Start shutdown procedures */
#define SCTP_ABORT 0x0200 /* Send an ABORT to peer */
#define SCTP_UNORDERED 0x0400 /* Message is un-ordered */
#define SCTP_ADDR_OVER 0x0800 /* Override the primary-address */
#define SCTP_SENDALL 0x1000 /* Send this on all associations */
#define SCTP_EOR 0x2000 /* end of message signal */
#define SCTP_SACK_IMMEDIATELY 0x4000 /* Set I-Bit */
#define INVALID_SINFO_FLAG(x) (((x) & 0xffffff00 \
& ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR)) != 0)
SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR |\
SCTP_SACK_IMMEDIATELY)) != 0)
/* for the endpoint */
/* The lower byte is an enumeration of PR-SCTP policies */
@ -346,6 +348,16 @@ struct sctp_authkey_event {
/* indication values */
#define SCTP_AUTH_NEWKEY 0x0001
#define SCTP_AUTH_NO_AUTH 0x0002
#define SCTP_AUTH_FREE_KEY 0x0003
struct sctp_sender_dry_event {
uint16_t sender_dry_type;
uint16_t sender_dry_flags;
uint32_t sender_dry_length;
sctp_assoc_t sender_dry_assoc_id;
};
/*
@ -386,6 +398,7 @@ union sctp_notification {
struct sctp_adaption_event sn_adaption_event;
struct sctp_pdapi_event sn_pdapi_event;
struct sctp_authkey_event sn_auth_event;
struct sctp_sender_dry_event sn_sender_dry_event;
struct sctp_stream_reset_event sn_strreset_event;
};
@ -401,7 +414,7 @@ union sctp_notification {
#define SCTP_PARTIAL_DELIVERY_EVENT 0x0007
#define SCTP_AUTHENTICATION_EVENT 0x0008
#define SCTP_STREAM_RESET_EVENT 0x0009
#define SCTP_SENDER_DRY_EVENT 0x000a
/*
* socket option structs
@ -539,6 +552,7 @@ struct sctp_assoc_value {
};
struct sctp_assoc_ids {
uint32_t gaids_number_of_ids;
sctp_assoc_t gaids_assoc_id[0];
};

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_auth.h>
#include <netinet/sctp_bsd_addr.h>
#include <netinet/sctp_cc_functions.h>
#include <netinet/udp.h>
@ -201,6 +202,9 @@ sctp_notify_mbuf(struct sctp_inpcb *inp,
/* Adjust destination size limit */
if (net->mtu > nxtsz) {
net->mtu = nxtsz;
if (net->port) {
net->mtu -= sizeof(struct udphdr);
}
}
/* now what about the ep? */
if (stcb->asoc.smallest_mtu > nxtsz) {
@ -507,8 +511,10 @@ sctp_attach(struct socket *so, int proto, struct thread *p)
struct inpcb *ip_inp;
int error;
uint32_t vrf_id = SCTP_DEFAULT_VRFID;
#ifdef IPSEC
uint32_t flags;
#endif
inp = (struct sctp_inpcb *)so->so_pcb;
@ -1704,6 +1710,29 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
*optsize = sizeof(*av);
}
break;
/* EY - set socket option for nr_sacks */
case SCTP_NR_SACK_ON_OFF:
{
struct sctp_assoc_value *av;
SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
SCTP_FIND_STCB(inp, stcb, av->assoc_id);
if (stcb) {
av->assoc_value = stcb->asoc.sctp_nr_sack_on_off;
SCTP_TCB_UNLOCK(stcb);
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
error = ENOTCONN;
}
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
error = ENOPROTOOPT;
}
*optsize = sizeof(*av);
}
break;
/* JRS - Get socket option for pluggable congestion control */
case SCTP_PLUGGABLE_CC:
{
@ -1767,7 +1796,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
at = 0;
limit = *optsize / sizeof(sctp_assoc_t);
limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
SCTP_INP_RLOCK(inp);
LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
if (at < limit) {
@ -1779,7 +1808,8 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
}
}
SCTP_INP_RUNLOCK(inp);
*optsize = at * sizeof(sctp_assoc_t);
ids->gaids_number_of_ids = at;
*optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
}
break;
case SCTP_CONTEXT:
@ -1961,6 +1991,9 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
events->sctp_authentication_event = 1;
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
events->sctp_sender_dry_event = 1;
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
events->sctp_stream_reset_events = 1;
SCTP_INP_RUNLOCK(inp);
@ -2532,7 +2565,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
if (stcb) {
/* get the active key on the assoc */
scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
SCTP_TCB_UNLOCK(stcb);
} else {
/* get the endpoint active key */
@ -2789,6 +2822,27 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
}
break;
/* EY nr_sack_on_off socket option */
case SCTP_NR_SACK_ON_OFF:
{
struct sctp_assoc_value *av;
SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
SCTP_FIND_STCB(inp, stcb, av->assoc_id);
if (stcb) {
stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value;
SCTP_TCB_UNLOCK(stcb);
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
error = ENOTCONN;
}
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
error = ENOPROTOOPT;
}
}
break;
/* JRS - Set socket option for pluggable congestion control */
case SCTP_PLUGGABLE_CC:
{
@ -3012,7 +3066,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
shared_key->key = key;
shared_key->keyid = sca->sca_keynumber;
sctp_insert_sharedkey(shared_keys, shared_key);
error = sctp_insert_sharedkey(shared_keys, shared_key);
SCTP_TCB_UNLOCK(stcb);
} else {
/* set it on the endpoint */
@ -3046,7 +3100,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
shared_key->key = key;
shared_key->keyid = sca->sca_keynumber;
sctp_insert_sharedkey(shared_keys, shared_key);
error = sctp_insert_sharedkey(shared_keys, shared_key);
SCTP_INP_WUNLOCK(inp);
}
break;
@ -3108,22 +3162,29 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
{
struct sctp_authkeyid *scact;
SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
optsize);
SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
/* set the active key on the right place */
if (stcb) {
/* set the active key on the assoc */
if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
if (sctp_auth_setactivekey(stcb,
scact->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
SCTP_FROM_SCTP_USRREQ,
EINVAL);
error = EINVAL;
}
SCTP_TCB_UNLOCK(stcb);
} else {
/* set the active key on the endpoint */
SCTP_INP_WLOCK(inp);
if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
if (sctp_auth_setactivekey_ep(inp,
scact->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
SCTP_FROM_SCTP_USRREQ,
EINVAL);
error = EINVAL;
}
SCTP_INP_WUNLOCK(inp);
@ -3134,20 +3195,58 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
{
struct sctp_authkeyid *scdel;
SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
optsize);
SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
/* delete the key from the right place */
if (stcb) {
if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
if (sctp_delete_sharedkey(stcb,
scdel->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
SCTP_FROM_SCTP_USRREQ,
EINVAL);
error = EINVAL;
}
SCTP_TCB_UNLOCK(stcb);
} else {
SCTP_INP_WLOCK(inp);
if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
if (sctp_delete_sharedkey_ep(inp,
scdel->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
SCTP_FROM_SCTP_USRREQ,
EINVAL);
error = EINVAL;
}
SCTP_INP_WUNLOCK(inp);
}
break;
}
case SCTP_AUTH_DEACTIVATE_KEY:
{
struct sctp_authkeyid *keyid;
SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
optsize);
SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
/* deactivate the key from the right place */
if (stcb) {
if (sctp_deact_sharedkey(stcb,
keyid->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
SCTP_FROM_SCTP_USRREQ,
EINVAL);
error = EINVAL;
}
SCTP_TCB_UNLOCK(stcb);
} else {
SCTP_INP_WLOCK(inp);
if (sctp_deact_sharedkey_ep(inp,
keyid->scact_keynumber)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
SCTP_FROM_SCTP_USRREQ,
EINVAL);
error = EINVAL;
}
SCTP_INP_WUNLOCK(inp);
@ -3414,6 +3513,12 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
}
if (events->sctp_sender_dry_event) {
sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
} else {
sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
}
if (events->sctp_stream_reset_events) {
sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
} else {
@ -4123,6 +4228,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
#ifdef INET6
if (addr->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6p;
if (addr->sa_len != sizeof(struct sockaddr_in6)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return (EINVAL);
@ -4136,6 +4242,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
#endif
if (addr->sa_family == AF_INET) {
struct sockaddr_in *sinp;
if (addr->sa_len != sizeof(struct sockaddr_in)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return (EINVAL);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -86,6 +86,10 @@ extern struct pr_usrreqs sctp_usrreqs;
}
#define sctp_free_a_strmoq(_stcb, _strmoq) { \
if ((_strmoq)->holds_key_ref) { \
sctp_auth_key_release(stcb, sp->auth_keyid); \
(_strmoq)->holds_key_ref = 0; \
} \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
SCTP_DECR_STRMOQ_COUNT(); \
}
@ -94,11 +98,15 @@ extern struct pr_usrreqs sctp_usrreqs;
(_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
if ((_strmoq)) { \
SCTP_INCR_STRMOQ_COUNT(); \
(_strmoq)->holds_key_ref = 0; \
} \
}
#define sctp_free_a_chunk(_stcb, _chk) { \
if ((_chk)->holds_key_ref) {\
sctp_auth_key_release((_stcb), (_chk)->auth_keyid); \
(_chk)->holds_key_ref = 0; \
} \
if(_stcb) { \
SCTP_TCB_LOCK_ASSERT((_stcb)); \
if ((_chk)->whoTo) { \
@ -126,21 +134,22 @@ extern struct pr_usrreqs sctp_usrreqs;
if ((_chk)) { \
SCTP_INCR_CHK_COUNT(); \
(_chk)->whoTo = NULL; \
(_chk)->holds_key_ref = 0; \
} \
} else { \
(_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
(_chk)->holds_key_ref = 0; \
SCTP_STAT_INCR(sctps_cached_chk); \
(_stcb)->asoc.free_chunk_cnt--; \
} \
}
#define sctp_free_remote_addr(__net) { \
if ((__net)) { \
if (atomic_fetchadd_int(&(__net)->ref_count, -1) == 1) { \
if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
(void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \
(void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \
(void)SCTP_OS_TIMER_STOP(&(__net)->fr_timer.timer); \
@ -160,64 +169,18 @@ extern struct pr_usrreqs sctp_usrreqs;
} \
}
#ifdef INVARIANTS
#define sctp_sbfree(ctl, stcb, sb, m) { \
uint32_t val; \
val = atomic_fetchadd_int(&(sb)->sb_cc,-(SCTP_BUF_LEN((m)))); \
if (val < SCTP_BUF_LEN((m))) { \
panic("sb_cc goes negative"); \
} \
val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-(MSIZE)); \
if (val < MSIZE) { \
panic("sb_mbcnt goes negative"); \
} \
SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
val = atomic_fetchadd_int(&(stcb)->asoc.sb_cc,-(SCTP_BUF_LEN((m)))); \
if (val < SCTP_BUF_LEN((m))) {\
panic("stcb->sb_cc goes negative"); \
} \
val = atomic_fetchadd_int(&(stcb)->asoc.my_rwnd_control_len,-(MSIZE)); \
if (val < MSIZE) { \
panic("asoc->mbcnt goes negative"); \
} \
SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
} \
if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
SCTP_BUF_TYPE(m) != MT_OOBDATA) \
atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
}
#else
#define sctp_sbfree(ctl, stcb, sb, m) { \
uint32_t val; \
val = atomic_fetchadd_int(&(sb)->sb_cc,-(SCTP_BUF_LEN((m)))); \
if (val < SCTP_BUF_LEN((m))) { \
(sb)->sb_cc = 0;\
} \
val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-(MSIZE)); \
if (val < MSIZE) { \
(sb)->sb_mbcnt = 0; \
} \
if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
val = atomic_fetchadd_int(&(stcb)->asoc.sb_cc,-(SCTP_BUF_LEN((m)))); \
if (val < SCTP_BUF_LEN((m))) {\
(stcb)->asoc.sb_cc = 0; \
} \
val = atomic_fetchadd_int(&(stcb)->asoc.my_rwnd_control_len,-(MSIZE)); \
if (val < MSIZE) { \
(stcb)->asoc.my_rwnd_control_len = 0; \
} \
} \
if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
SCTP_BUF_TYPE(m) != MT_OOBDATA) \
atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
}
#endif
#define sctp_sballoc(stcb, sb, m) { \
atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -848,7 +848,7 @@ sctp_select_initial_TSN(struct sctp_pcb *inp)
}
uint32_t
sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
{
u_long x, not_done;
struct timeval now;
@ -861,7 +861,7 @@ sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
/* we never use 0 */
continue;
}
if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
not_done = 0;
}
}
@ -894,6 +894,8 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
asoc->cookie_life = m->sctp_ep.def_cookie_life;
asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
/* EY Init nr_sack variable */
asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
/* JRS 5/21/07 - Init CMT PF variables */
asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
asoc->sctp_frag_point = m->sctp_frag_point;
@ -910,7 +912,8 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
#endif
asoc->sb_send_resv = 0;
if (override_tag) {
if (sctp_is_in_timewait(override_tag)) {
#ifdef MICHAELS_EXPERIMENT
if (sctp_is_in_timewait(override_tag, stcb->sctp_ep->sctp_lport, stcb->rport)) {
/*
* It must be in the time-wait hash, we put it there
* when we aloc one. If not the peer is playing
@ -924,13 +927,15 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
#endif
return (ENOMEM);
}
#else
asoc->my_vtag = override_tag;
#endif
} else {
asoc->my_vtag = sctp_select_a_tag(m, 1);
asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
}
/* Get the nonce tags */
asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
asoc->vrf_id = vrf_id;
if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
@ -951,13 +956,12 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
#endif
asoc->refcnt = 0;
asoc->assoc_up_sent = 0;
asoc->assoc_id = asoc->my_vtag;
asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
sctp_select_initial_TSN(&m->sctp_ep);
asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
/* we are optimisitic here */
asoc->peer_supports_pktdrop = 1;
asoc->peer_supports_nat = 0;
asoc->sent_queue_retran_cnt = 0;
/* for CMT */
@ -1146,6 +1150,17 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
return (ENOMEM);
}
memset(asoc->mapping_array, 0, asoc->mapping_array_size);
/* EY - initialize the nr_mapping_array just like mapping array */
asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
SCTP_M_MAP);
/*
* if (asoc->nr_mapping_array == NULL) { SCTP_FREE(asoc->strmout,
* SCTP_M_STRMO); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL,
* SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM); }
*/
memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
/* Now the init of the other outqueues */
TAILQ_INIT(&asoc->free_chunks);
TAILQ_INIT(&asoc->out_wheel);
@ -1159,6 +1174,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
TAILQ_INIT(&asoc->asconf_queue);
/* authentication fields */
asoc->authinfo.random = NULL;
asoc->authinfo.active_keyid = 0;
asoc->authinfo.assoc_key = NULL;
asoc->authinfo.assoc_keyid = 0;
asoc->authinfo.recv_key = NULL;
@ -1204,6 +1220,30 @@ sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
return (0);
}
/* EY - nr_sack version of the above method */
int
sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
{
/* nr mapping array needs to grow */
uint8_t *new_array;
uint32_t new_size;
new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
/* can't get more, forget it */
SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
new_size);
return (-1);
}
memset(new_array, 0, new_size);
memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
asoc->nr_mapping_array = new_array;
asoc->nr_mapping_array_size = new_size;
return (0);
}
#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
static void
sctp_iterator_work(struct sctp_iterator *it)
@ -1617,7 +1657,15 @@ sctp_timeout_handler(void *t)
stcb->asoc.timosack++;
if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
sctp_sack_check(stcb, 0, 0, &abort_flag);
sctp_send_sack(stcb);
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
@ -2904,19 +2952,6 @@ sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
#endif
/*
* First if we are are going down dump everything we can to the
* socket rcv queue.
*/
if ((stcb == NULL) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
) {
/* If the socket is gone we are out of here */
return;
}
/*
* For TCP model AND UDP connected sockets we will send an error up
* when an ABORT comes in.
@ -3025,10 +3060,10 @@ sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
struct sctp_paddr_change *spc;
struct sctp_queued_to_read *control;
if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
return;
@ -3099,15 +3134,15 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
#endif
)
{
struct mbuf *m_notify, *tt;
struct mbuf *m_notify;
struct sctp_send_failed *ssf;
struct sctp_queued_to_read *control;
int length;
if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@ -3133,20 +3168,18 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
ssf->ssf_assoc_id = sctp_get_associd(stcb);
/* Take off the chunk header */
m_adj(chk->data, sizeof(struct sctp_data_chunk));
/* trim out any 0 len mbufs */
while (SCTP_BUF_LEN(chk->data) == 0) {
tt = chk->data;
chk->data = SCTP_BUF_NEXT(tt);
SCTP_BUF_NEXT(tt) = NULL;
sctp_m_freem(tt);
}
SCTP_BUF_NEXT(m_notify) = chk->data;
SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
if (chk->data) {
/*
* trim off the sctp chunk header(it should be there)
*/
if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
m_adj(chk->data, sizeof(struct sctp_data_chunk));
sctp_mbuf_crush(chk->data);
chk->send_size -= sizeof(struct sctp_data_chunk);
}
}
/* Steal off the mbuf */
chk->data = NULL;
/*
@ -3187,10 +3220,10 @@ sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
struct sctp_queued_to_read *control;
int length;
if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
/* event not enabled */
return;
}
length = sizeof(struct sctp_send_failed) + sp->length;
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
@ -3257,10 +3290,10 @@ sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
struct sctp_adaptation_event *sai;
struct sctp_queued_to_read *control;
if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@ -3304,11 +3337,10 @@ sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
struct sctp_queued_to_read *control;
struct sockbuf *sb;
if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@ -3378,9 +3410,6 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb)
* For TCP model AND UDP connected sockets we will send an error up
* when an SHUTDOWN completes
*/
if (stcb == NULL) {
return;
}
if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
/* mark socket closed for read/write and wakeup! */
@ -3404,10 +3433,10 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb)
SCTP_SOCKET_UNLOCK(so, 1);
#endif
}
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@ -3439,6 +3468,53 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb)
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
}
static void
sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
int so_locked
#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
SCTP_UNUSED
#endif
)
{
struct mbuf *m_notify;
struct sctp_sender_dry_event *event;
struct sctp_queued_to_read *control;
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL) {
/* no space left */
return;
}
SCTP_BUF_LEN(m_notify) = 0;
event = mtod(m_notify, struct sctp_sender_dry_event *);
event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
event->sender_dry_flags = 0;
event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
event->sender_dry_assoc_id = sctp_get_associd(stcb);
SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
SCTP_BUF_NEXT(m_notify) = NULL;
/* append to socket */
control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
0, 0, 0, 0, 0, 0, m_notify);
if (control == NULL) {
/* no memory */
sctp_m_freem(m_notify);
return;
}
control->length = SCTP_BUF_LEN(m_notify);
control->spec_flags = M_NOTIFICATION;
/* not that we need this */
control->tail_mbuf = m_notify;
sctp_add_to_readq(stcb->sctp_ep, stcb, control,
&stcb->sctp_socket->so_rcv, 1, so_locked);
}
static void
sctp_notify_stream_reset(struct sctp_tcb *stcb,
int number_entries, uint16_t * list, int flag)
@ -3448,13 +3524,10 @@ sctp_notify_stream_reset(struct sctp_tcb *stcb,
struct sctp_stream_reset_event *strreset;
int len;
if (stcb == NULL) {
return;
}
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
/* event not enabled */
return;
}
m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@ -3516,19 +3589,11 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
#endif
)
{
if (stcb == NULL) {
/* unlikely but */
return;
}
if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
if ((stcb == NULL) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
) {
/* No notifications up when we are in a no socket state */
return;
}
if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
/* Can't send up to a closed socket any notifications */
(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
/* If the socket is gone we are out of here */
return;
}
if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
@ -3549,6 +3614,10 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
sctp_notify_adaptation_layer(stcb, error);
}
if (stcb->asoc.peer_supports_auth == 0) {
sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
NULL, so_locked);
}
break;
case SCTP_NOTIFY_ASSOC_DOWN:
sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
@ -3613,6 +3682,10 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
break;
case SCTP_NOTIFY_ASSOC_RESTART:
sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
if (stcb->asoc.peer_supports_auth == 0) {
sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
NULL, so_locked);
}
break;
case SCTP_NOTIFY_HB_RESP:
break;
@ -3651,16 +3724,22 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
break;
case SCTP_NOTIFY_AUTH_NEW_KEY:
sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
(uint16_t) (uintptr_t) data);
(uint16_t) (uintptr_t) data,
so_locked);
break;
#if 0
case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
error, (uint16_t) (uintptr_t) data);
case SCTP_NOTIFY_AUTH_FREE_KEY:
sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
(uint16_t) (uintptr_t) data,
so_locked);
break;
case SCTP_NOTIFY_NO_PEER_AUTH:
sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
(uint16_t) (uintptr_t) data,
so_locked);
break;
case SCTP_NOTIFY_SENDER_DRY:
sctp_notify_sender_dry_event(stcb, so_locked);
break;
#endif /* not yet? remove? */
default:
SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
__FUNCTION__, notification, notification);
@ -3701,17 +3780,6 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
while (chk) {
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
asoc->sent_queue_cnt--;
if (chk->data) {
/*
* trim off the sctp chunk header(it should
* be there)
*/
if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
m_adj(chk->data, sizeof(struct sctp_data_chunk));
sctp_mbuf_crush(chk->data);
chk->send_size -= sizeof(struct sctp_data_chunk);
}
}
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
@ -3730,17 +3798,6 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
while (chk) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt--;
if (chk->data) {
/*
* trim off the sctp chunk header(it should
* be there)
*/
if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
m_adj(chk->data, sizeof(struct sctp_data_chunk));
sctp_mbuf_crush(chk->data);
chk->send_size -= sizeof(struct sctp_data_chunk);
}
}
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
if (chk->data) {
@ -4355,7 +4412,6 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
SCTP_INP_READ_UNLOCK(new_inp);
}
void
sctp_add_to_readq(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
@ -4687,7 +4743,9 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
#endif
sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
sctp_m_freem(tp1->data);
tp1->data = NULL;
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
@ -4970,7 +5028,15 @@ sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
goto out;
}
SCTP_STAT_INCR(sctps_wu_sacks_sent);
sctp_send_sack(stcb);
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
sctp_chunk_output(stcb->sctp_ep, stcb,
SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
/* make sure no timer is running */

View File

@ -77,7 +77,7 @@ struct sctp_ifa *
uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
uint32_t sctp_select_a_tag(struct sctp_inpcb *, int);
uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int);
int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, int, uint32_t, uint32_t);
@ -168,6 +168,8 @@ sctp_report_all_outbound(struct sctp_tcb *, int, int
int sctp_expand_mapping_array(struct sctp_association *, uint32_t);
/* EY nr_sack version of the above method, expands nr_mapping_array */
int sctp_expand_nr_mapping_array(struct sctp_association *, uint32_t);
void
sctp_abort_notification(struct sctp_tcb *, int, int
#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)

View File

@ -286,6 +286,9 @@ sctp6_notify_mbuf(struct sctp_inpcb *inp, struct icmp6_hdr *icmp6,
/* Adjust destination size limit */
if (net->mtu > nxtsz) {
net->mtu = nxtsz;
if (net->port) {
net->mtu -= sizeof(struct udphdr);
}
}
/* now what about the ep? */
if (stcb->asoc.smallest_mtu > nxtsz) {