Ensure that the padding of the last parameter of an INIT chunk

is not included in the chunk length as required by RFC 4960.
While there, cleanup sctp_send_initiate().

MFC after: 2 weeks
This commit is contained in:
Michael Tuexen 2012-12-08 08:22:33 +00:00
parent fd53babd77
commit 3fb7827628
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=244021
2 changed files with 183 additions and 143 deletions

View File

@ -1940,27 +1940,27 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa,
}
static struct mbuf *
sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
{
struct sctp_paramhdr *parmh;
struct mbuf *mret;
int len;
uint16_t plen;
switch (ifa->address.sa.sa_family) {
#ifdef INET
case AF_INET:
len = sizeof(struct sctp_ipv4addr_param);
plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
break;
#endif
#ifdef INET6
case AF_INET6:
len = sizeof(struct sctp_ipv6addr_param);
plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
break;
#endif
default:
return (m);
}
if (M_TRAILINGSPACE(m) >= len) {
if (M_TRAILINGSPACE(m) >= plen) {
/* easy side we just drop it on the end */
parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
mret = m;
@ -1970,7 +1970,7 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
while (SCTP_BUF_NEXT(mret) != NULL) {
mret = SCTP_BUF_NEXT(mret);
}
SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
if (SCTP_BUF_NEXT(mret) == NULL) {
/* We are hosed, can't add more addresses */
return (m);
@ -1989,9 +1989,9 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
sin = (struct sockaddr_in *)&ifa->address.sin;
ipv4p = (struct sctp_ipv4addr_param *)parmh;
parmh->param_type = htons(SCTP_IPV4_ADDRESS);
parmh->param_length = htons(len);
parmh->param_length = htons(plen);
ipv4p->addr = sin->sin_addr.s_addr;
SCTP_BUF_LEN(mret) += len;
SCTP_BUF_LEN(mret) += plen;
break;
}
#endif
@ -2004,18 +2004,21 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
ipv6p = (struct sctp_ipv6addr_param *)parmh;
parmh->param_type = htons(SCTP_IPV6_ADDRESS);
parmh->param_length = htons(len);
parmh->param_length = htons(plen);
memcpy(ipv6p->addr, &sin6->sin6_addr,
sizeof(ipv6p->addr));
/* clear embedded scope in the address */
in6_clearscope((struct in6_addr *)ipv6p->addr);
SCTP_BUF_LEN(mret) += len;
SCTP_BUF_LEN(mret) += plen;
break;
}
#endif
default:
return (m);
}
if (len != NULL) {
*len += plen;
}
return (mret);
}
@ -2023,7 +2026,8 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
struct mbuf *
sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct sctp_scoping *scope,
struct mbuf *m_at, int cnt_inits_to)
struct mbuf *m_at, int cnt_inits_to,
uint16_t * padding_len, uint16_t * chunk_len)
{
struct sctp_vrf *vrf = NULL;
int cnt, limit_out = 0, total_count;
@ -2103,7 +2107,15 @@ sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
scope->site_scope, 0) == 0) {
continue;
}
m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
if ((chunk_len != NULL) &&
(padding_len != NULL) &&
(*padding_len > 0)) {
memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
SCTP_BUF_LEN(m_at) += *padding_len;
*chunk_len += *padding_len;
*padding_len = 0;
}
m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
if (limit_out) {
cnt++;
total_count++;
@ -2178,7 +2190,15 @@ sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
scope->site_scope, 0) == 0) {
continue;
}
m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
if ((chunk_len != NULL) &&
(padding_len != NULL) &&
(*padding_len > 0)) {
memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
SCTP_BUF_LEN(m_at) += *padding_len;
*chunk_len += *padding_len;
*padding_len = 0;
}
m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
cnt++;
if (cnt >= SCTP_ADDRESS_LIMIT) {
break;
@ -4575,21 +4595,19 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
#endif
)
{
struct mbuf *m, *m_at, *mp_last;
struct sctp_scoping scp;
struct mbuf *m;
struct sctp_nets *net;
struct sctp_init_chunk *init;
struct sctp_supported_addr_param *sup_addr;
struct sctp_adaptation_layer_indication *ali;
struct sctp_ecn_supported_param *ecn;
struct sctp_prsctp_supported_param *prsctp;
struct sctp_supported_chunk_types_param *pr_supported;
struct sctp_paramhdr *ph;
int cnt_inits_to = 0;
int padval, ret;
int num_ext;
int p_len;
int ret;
uint16_t num_ext, chunk_len, padding_len, parameter_len;
/* INIT's always go to the primary (and usually ONLY address) */
mp_last = NULL;
net = stcb->asoc.primary_destination;
if (net == NULL) {
net = TAILQ_FIRST(&stcb->asoc.nets);
@ -4606,15 +4624,12 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
}
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
#ifdef INET6
if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
if (net->ro._l_addr.sa.sa_family == AF_INET6) {
/*
* special hook, if we are sending to link local it will not
* show up in our private address count.
*/
struct sockaddr_in6 *sin6l;
sin6l = &net->ro._l_addr.sin6;
if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
cnt_inits_to = 1;
}
#endif
@ -4632,14 +4647,15 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
return;
}
SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
padding_len = 0;
/*
* assume peer supports asconf in order to be able to queue local
* address changes while an INIT is in flight and before the assoc
* is established.
*/
stcb->asoc.peer_supports_asconf = 1;
/* Now lets put the SCTP header in place */
/* Now lets put the chunk header in place */
init = mtod(m, struct sctp_init_chunk *);
/* now the chunk header */
init->ch.chunk_type = SCTP_INITIATION;
@ -4651,84 +4667,112 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
/* set up some of the credits. */
init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
SCTP_MINIMAL_RWND));
init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
#if defined(INET) || defined(INET6)
/* now the address restriction */
/* XXX Should we take the address family of the socket into account? */
sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
sizeof(*init));
sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
#ifdef INET6
#ifdef INET
/* we support 2 types: IPv4/IPv6 */
sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + 2 * sizeof(uint16_t));
parameter_len = (uint16_t) (sizeof(struct sctp_paramhdr) + 2 * sizeof(uint16_t));
sup_addr->ph.param_length = htons(parameter_len);
sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
padding_len = 0;
#else
/* we support 1 type: IPv6 */
sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
parameter_len = (uint16_t) (sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
sup_addr->ph.param_length = htons(parameter_len);
sup_addr->addr_type[0] = htons(SCTP_IPV6_ADDRESS);
sup_addr->addr_type[1] = htons(0); /* this is the padding */
padding_len = (uint16_t) sizeof(uint16_t);
#endif
#else
/* we support 1 type: IPv4 */
sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
parameter_len = (uint16_t) (sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
sup_addr->ph.param_length = htons(parameter_len);
sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
sup_addr->addr_type[1] = htons(0); /* this is the padding */
padding_len = (uint16_t) sizeof(uint16_t);
#endif
SCTP_BUF_LEN(m) += sizeof(struct sctp_supported_addr_param);
/* adaptation layer indication parameter */
ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(struct sctp_supported_addr_param));
chunk_len += parameter_len;
#endif
/* Adaptation layer indication parameter */
/* XXX: Should we include this always? */
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
ali->ph.param_length = htons(sizeof(*ali));
ali->ph.param_length = htons(parameter_len);
ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
SCTP_BUF_LEN(m) += sizeof(*ali);
ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
chunk_len += parameter_len;
if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
/* Add NAT friendly parameter */
struct sctp_paramhdr *ph;
ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
/* Add NAT friendly parameter. */
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
ph->param_length = htons(sizeof(struct sctp_paramhdr));
SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
ph->param_length = htons(parameter_len);
chunk_len += parameter_len;
}
/* now any cookie time extensions */
if (stcb->asoc.cookie_preserve_req) {
struct sctp_cookie_perserve_param *cookie_preserve;
cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
cookie_preserve->ph.param_length = htons(
sizeof(*cookie_preserve));
cookie_preserve->ph.param_length = htons(parameter_len);
cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
ecn = (struct sctp_ecn_supported_param *)(
(caddr_t)cookie_preserve + sizeof(*cookie_preserve));
stcb->asoc.cookie_preserve_req = 0;
chunk_len += parameter_len;
}
/* ECN parameter */
if (stcb->asoc.ecn_allowed == 1) {
ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
ecn->ph.param_length = htons(sizeof(*ecn));
SCTP_BUF_LEN(m) += sizeof(*ecn);
prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
sizeof(*ecn));
} else {
prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
ph->param_type = htons(SCTP_ECN_CAPABLE);
ph->param_length = htons(parameter_len);
chunk_len += parameter_len;
}
/* And now tell the peer we do pr-sctp */
prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
prsctp->ph.param_length = htons(sizeof(*prsctp));
SCTP_BUF_LEN(m) += sizeof(*prsctp);
/* And now tell the peer we do support PR-SCTP. */
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
ph->param_length = htons(parameter_len);
chunk_len += parameter_len;
/* And now tell the peer we do all the extensions */
pr_supported = (struct sctp_supported_chunk_types_param *)
((caddr_t)prsctp + sizeof(*prsctp));
pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
num_ext = 0;
pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
@ -4742,99 +4786,94 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
if (stcb->asoc.sctp_nr_sack_on_off == 1) {
pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
}
p_len = sizeof(*pr_supported) + num_ext;
pr_supported->ph.param_length = htons(p_len);
bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
pr_supported->ph.param_length = htons(parameter_len);
padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
chunk_len += parameter_len;
/* add authentication parameters */
if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
struct sctp_auth_random *randp;
struct sctp_auth_hmac_algo *hmacs;
struct sctp_auth_chunk_list *chunks;
/* attach RANDOM parameter, if available */
if (stcb->asoc.authinfo.random != NULL) {
randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
struct sctp_auth_random *randp;
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
/* random key already contains the header */
bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
/* zero out any padding required */
bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
chunk_len += parameter_len;
}
/* add HMAC_ALGO parameter */
hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
(uint8_t *) hmacs->hmac_ids);
if (p_len > 0) {
p_len += sizeof(*hmacs);
if ((stcb->asoc.local_hmacs != NULL) &&
(stcb->asoc.local_hmacs->num_algo > 0)) {
struct sctp_auth_hmac_algo *hmacs;
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
hmacs->ph.param_length = htons(p_len);
/* zero out any padding required */
bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
hmacs->ph.param_length = htons(parameter_len);
sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
chunk_len += parameter_len;
}
/* add CHUNKS parameter */
chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
chunks->chunk_types);
if (p_len > 0) {
p_len += sizeof(*chunks);
if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
struct sctp_auth_chunk_list *chunks;
if (padding_len > 0) {
memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
chunk_len += padding_len;
padding_len = 0;
}
chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
chunks->ph.param_length = htons(p_len);
/* zero out any padding required */
bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
chunks->ph.param_length = htons(parameter_len);
sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
chunk_len += parameter_len;
}
}
SCTP_BUF_LEN(m) = chunk_len;
/* now the addresses */
{
struct sctp_scoping scp;
/*
* To optimize this we could put the scoping stuff into a
* structure and remove the individual uint8's from the
* assoc structure. Then we could just sifa in the address
* within the stcb. But for now this is a quick hack to get
* the address stuff teased apart.
*/
scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
scp.loopback_scope = stcb->asoc.loopback_scope;
scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
scp.local_scope = stcb->asoc.local_scope;
scp.site_scope = stcb->asoc.site_scope;
sctp_add_addresses_to_i_ia(inp, stcb, &scp, m, cnt_inits_to);
}
/* calulate the size and update pkt header and chunk header */
p_len = 0;
for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
if (SCTP_BUF_NEXT(m_at) == NULL)
mp_last = m_at;
p_len += SCTP_BUF_LEN(m_at);
}
init->ch.chunk_length = htons(p_len);
/*
* We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
* here since the timer will drive a retranmission.
* To optimize this we could put the scoping stuff into a structure
* and remove the individual uint8's from the assoc structure. Then
* we could just sifa in the address within the stcb. But for now
* this is a quick hack to get the address stuff teased apart.
*/
scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
scp.loopback_scope = stcb->asoc.loopback_scope;
scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
scp.local_scope = stcb->asoc.local_scope;
scp.site_scope = stcb->asoc.site_scope;
sctp_add_addresses_to_i_ia(inp, stcb, &scp, m, cnt_inits_to, &padding_len, &chunk_len);
/* I don't expect this to execute but we will be safe here */
padval = p_len % 4;
if ((padval) && (mp_last)) {
/*
* The compiler worries that mp_last may not be set even
* though I think it is impossible :-> however we add
* mp_last here just in case.
*/
ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
if (ret) {
/* Houston we have a problem, no space */
init->ch.chunk_length = htons(chunk_len);
if (padding_len > 0) {
struct mbuf *m_at, *mp_last;
mp_last = NULL;
for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
if (SCTP_BUF_NEXT(m_at) == NULL)
mp_last = m_at;
}
if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
sctp_m_freem(m);
return;
}
@ -5753,7 +5792,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
/* I can have what I want :> */
initack->init.num_outbound_streams = htons(i_want);
}
/* tell him his limt. */
/* tell him his limit. */
initack->init.num_inbound_streams =
htons(inp->sctp_ep.max_open_streams_intome);
@ -5869,7 +5908,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
scp.ipv4_local_scope = stc.ipv4_scope;
scp.local_scope = stc.local_scope;
scp.site_scope = stc.site_scope;
m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to);
m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
}
/* tack on the operational error if present */

View File

@ -46,7 +46,8 @@ sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
struct sctp_scoping *scope,
struct mbuf *m_at,
int cnt_inits_to);
int cnt_inits_to,
uint16_t * padding_len, uint16_t * chunk_len);
int sctp_is_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);