- Macro-izes the packed declaration in all headers.

- Vimage prep - these are major restructures to move
  all global variables to be accessed via a macro or two.
  The variables all go into a single structure.
- Asconf address addition tweaks (add_or_del Interfaces)
- Fix rwnd calcualtion to be more conservative.
- Support SACK_IMMEDIATE flag to skip delayed sack
  by demand of peer.
- Comment updates in the sack mapping calculations
- Invarients panic added.
- Pre-support for UDP tunneling (we can do this on
  MAC but will need added support from UDP to
  get a "pipe" of UDP packets in.
- clear trace buffer sysctl added when local tracing on.

Note the majority of this huge patch is all the vimage prep stuff :-)
This commit is contained in:
rrs 2008-06-14 07:58:05 +00:00
parent f07c38e84a
commit 7782c49376
23 changed files with 1345 additions and 1190 deletions

View File

@ -37,6 +37,9 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#define SCTP_PACKED __attribute__((packed))
/*
* SCTP protocol - RFC2960.
*/
@ -46,9 +49,7 @@ struct sctphdr {
uint32_t v_tag; /* verification tag of packet */
uint32_t checksum; /* Adler32 C-Sum */
/* chunks follow... */
}
__attribute__((packed));
} SCTP_PACKED;
/*
* SCTP Chunks
@ -58,7 +59,7 @@ __attribute__((packed));
uint8_t chunk_flags; /* chunk flags */
uint16_t chunk_length; /* chunk length */
/* optional params follow */
} __attribute__((packed));
} SCTP_PACKED;
/*
* SCTP chunk parameters
@ -66,7 +67,7 @@ __attribute__((packed));
struct sctp_paramhdr {
uint16_t param_type; /* parameter type */
uint16_t param_length; /* parameter length */
} __attribute__((packed));
} SCTP_PACKED;
/*
* user socket options: socket API defined
@ -307,45 +308,38 @@ __attribute__((packed));
uint16_t code;
uint16_t length;
/* optional cause-specific info may follow */
} __attribute__((packed));
} SCTP_PACKED;
struct sctp_error_invalid_stream {
struct sctp_error_cause cause; /* code=SCTP_ERROR_INVALID_STRE
* AM */
struct sctp_error_cause cause; /* code=SCTP_ERROR_INVALID_STREAM */
uint16_t stream_id; /* stream id of the DATA in error */
uint16_t reserved;
} __attribute__((packed));
} SCTP_PACKED;
struct sctp_error_missing_param {
struct sctp_error_cause cause; /* code=SCTP_ERROR_MISSING_PARA
* M */
uint32_t num_missing_params; /* number of missing
* parameters */
struct sctp_error_cause cause; /* code=SCTP_ERROR_MISSING_PARAM */
uint32_t num_missing_params; /* number of missing parameters */
/* uint16_t param_type's follow */
} __attribute__((packed));
} SCTP_PACKED;
struct sctp_error_stale_cookie {
struct sctp_error_cause cause; /* code=SCTP_ERROR_STALE_COOKIE
* */
struct sctp_error_cause cause; /* code=SCTP_ERROR_STALE_COOKIE */
uint32_t stale_time; /* time in usec of staleness */
} __attribute__((packed));
} SCTP_PACKED;
struct sctp_error_out_of_resource {
struct sctp_error_cause cause; /* code=SCTP_ERROR_OUT_OF_RESOU
* RCES */
} __attribute__((packed));
struct sctp_error_cause cause; /* code=SCTP_ERROR_OUT_OF_RESOURCES */
} SCTP_PACKED;
struct sctp_error_unresolv_addr {
struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRESOLVABLE
* _ADDR */
struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRESOLVABLE_ADDR */
} __attribute__((packed));
} SCTP_PACKED;
struct sctp_error_unrecognized_chunk {
struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRECOG_CHUN
* K */
struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRECOG_CHUNK */
struct sctp_chunkhdr ch;/* header from chunk in error */
} __attribute__((packed));
} SCTP_PACKED;
/*
* Main SCTP chunk types we place these here so natd and f/w's in user land
@ -407,7 +401,7 @@ __attribute__((packed));
#define SCTP_DATA_FIRST_FRAG 0x02
#define SCTP_DATA_NOT_FRAG 0x03
#define SCTP_DATA_UNORDERED 0x04
#define SCTP_DATA_SACK_IMMEDIATELY 0x08
/* ECN Nonce: SACK Chunk Specific Flags */
#define SCTP_SACK_NONCE_SUM 0x01
@ -541,4 +535,6 @@ __attribute__((packed));
#undef SCTP_PACKED
#endif /* !_NETINET_SCTP_H_ */

View File

@ -264,7 +264,7 @@ sctp_process_asconf_add_ip(struct mbuf *m, struct sctp_asconf_paramhdr *aph,
} /* end switch */
/* if 0.0.0.0/::0, add the source address instead */
if (zero_address && sctp_nat_friendly) {
if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
sa = (struct sockaddr *)&sa_source;
sctp_asconf_get_source_ip(m, sa);
SCTPDBG(SCTP_DEBUG_ASCONF1,
@ -416,7 +416,7 @@ sctp_process_asconf_delete_ip(struct mbuf *m, struct sctp_asconf_paramhdr *aph,
return m_reply;
}
/* if deleting 0.0.0.0/::0, delete all addresses except src addr */
if (zero_address && sctp_nat_friendly) {
if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
result = sctp_asconf_del_remote_addrs_except(stcb,
(struct sockaddr *)&sa_source);
@ -534,7 +534,7 @@ sctp_process_asconf_set_primary(struct mbuf *m,
}
/* if 0.0.0.0/::0, use the source address instead */
if (zero_address && sctp_nat_friendly) {
if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
sa = (struct sockaddr *)&sa_source;
sctp_asconf_get_source_ip(m, sa);
SCTPDBG(SCTP_DEBUG_ASCONF1,
@ -667,7 +667,7 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
if (ack->data != NULL) {
sctp_m_freem(ack->data);
}
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asconf_ack, ack);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), ack);
ack = ack_next;
}
}
@ -814,7 +814,7 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
send_reply:
ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
/* save the ASCONF-ACK reply */
ack = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asconf_ack,
ack = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asconf_ack),
struct sctp_asconf_ack);
if (ack == NULL) {
sctp_m_freem(m_ack);
@ -1392,7 +1392,7 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
#ifdef SCTP_DEBUG
if (sctp_debug_on && SCTP_DEBUG_ASCONF2) {
if (SCTP_BASE_SYSCTL(sctp_debug_on) && SCTP_DEBUG_ASCONF2) {
if (type == SCTP_ADD_IP_ADDRESS) {
SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
@ -1479,7 +1479,7 @@ sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
net->error_count = 0;
}
stcb->asoc.overall_error_count = 0;
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -2336,7 +2336,7 @@ sctp_asconf_iterator_end(void *ptr, uint32_t val)
ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
}
sctp_free_ifa(ifa);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, l);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), l);
SCTP_DECR_LADDR_COUNT();
l = l_next;
}
@ -2392,7 +2392,7 @@ sctp_set_primary_ip_address(struct sctp_ifa *ifa)
struct sctp_inpcb *inp;
/* go through all our PCB's */
LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
struct sctp_tcb *stcb;
/* process for all associations for this endpoint */
@ -3188,7 +3188,7 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
return (ENOMEM);
}
wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr,
wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr),
struct sctp_laddr);
if (wi == NULL) {
SCTP_FREE(asc, SCTP_M_ASC_IT);
@ -3203,7 +3203,7 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
if (inp->laddr_count < 2) {
/* can't delete the last local address */
SCTP_FREE(asc, SCTP_M_ASC_IT);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, wi);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
return (EINVAL);
}

View File

@ -43,8 +43,8 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_auth.h>
#ifdef SCTP_DEBUG
#define SCTP_AUTH_DEBUG (sctp_debug_on & SCTP_DEBUG_AUTH1)
#define SCTP_AUTH_DEBUG2 (sctp_debug_on & SCTP_DEBUG_AUTH2)
#define SCTP_AUTH_DEBUG (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH1)
#define SCTP_AUTH_DEBUG2 (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2)
#endif /* SCTP_DEBUG */
@ -1988,7 +1988,7 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
"SCTP: peer sent chunk list w/o AUTH\n");
return (-1);
}
if (!sctp_asconf_auth_nochk && peer_supports_asconf &&
if (!SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) && peer_supports_asconf &&
!peer_supports_auth) {
SCTPDBG(SCTP_DEBUG_AUTH1,
"SCTP: peer supports ASCONF but not AUTH\n");

View File

@ -48,7 +48,6 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_indata.h>
#include <sys/unistd.h>
/* Declare all of our malloc named types */
/* Note to Michael/Peter for mac-os,
@ -85,17 +84,17 @@ MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
void
sctp_wakeup_iterator(void)
{
wakeup(&sctppcbinfo.iterator_running);
wakeup(&SCTP_BASE_INFO(iterator_running));
}
static void
sctp_iterator_thread(void *v)
{
SCTP_IPI_ITERATOR_WQ_LOCK();
sctppcbinfo.iterator_running = 0;
SCTP_BASE_INFO(iterator_running) = 0;
while (1) {
msleep(&sctppcbinfo.iterator_running,
&sctppcbinfo.ipi_iterator_wq_mtx,
msleep(&SCTP_BASE_INFO(iterator_running),
&SCTP_BASE_INFO(ipi_iterator_wq_mtx),
0, "waiting_for_work", 0);
sctp_iterator_worker();
}
@ -108,7 +107,7 @@ sctp_startup_iterator(void)
ret = kproc_create(sctp_iterator_thread,
(void *)NULL,
&sctppcbinfo.thread_proc,
&SCTP_BASE_INFO(thread_proc),
RFPROC,
SCTP_KTHREAD_PAGES,
SCTP_KTRHEAD_NAME);
@ -172,6 +171,7 @@ sctp_is_desired_interface_type(struct ifaddr *ifa)
case IFT_PPP:
case IFT_LOOP:
case IFT_SLIP:
case IFT_GIF:
case IFT_IP:
case IFT_IPOVERCDLC:
case IFT_IPOVERCLAW:
@ -185,6 +185,7 @@ sctp_is_desired_interface_type(struct ifaddr *ifa)
return (result);
}
static void
sctp_init_ifns_for_vrf(int vrfid)
{
@ -204,20 +205,16 @@ sctp_init_ifns_for_vrf(int vrfid)
if (ifa->ifa_addr == NULL) {
continue;
}
if ((ifa->ifa_addr->sa_family != AF_INET) &&
(ifa->ifa_addr->sa_family != AF_INET6)
) {
if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
/* non inet/inet6 skip */
continue;
}
if (ifa->ifa_addr->sa_family == AF_INET6) {
ifa6 = (struct in6_ifaddr *)ifa;
ifa_flags = ifa6->ia6_flags;
if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
/* skip unspecifed addresses */
continue;
}
} else if (ifa->ifa_addr->sa_family == AF_INET) {
} else {
if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
continue;
}
@ -226,8 +223,6 @@ sctp_init_ifns_for_vrf(int vrfid)
/* non desired type */
continue;
}
if ((ifa->ifa_addr->sa_family == AF_INET6) ||
(ifa->ifa_addr->sa_family == AF_INET)) {
if (ifa->ifa_addr->sa_family == AF_INET6) {
ifa6 = (struct in6_ifaddr *)ifa;
ifa_flags = ifa6->ia6_flags;
@ -241,15 +236,14 @@ sctp_init_ifns_for_vrf(int vrfid)
ifn->if_xname,
(void *)ifa,
ifa->ifa_addr,
ifa_flags, 0
);
ifa_flags,
0);
if (sctp_ifa) {
sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
}
}
}
}
}
void
@ -269,24 +263,20 @@ sctp_init_vrf_list(int vrfid)
sctp_init_ifns_for_vrf(vrfid);
}
static uint8_t first_time = 0;
void
sctp_addr_change(struct ifaddr *ifa, int cmd)
{
struct sctp_ifa *ifap = NULL;
uint32_t ifa_flags = 0;
struct in6_ifaddr *ifa6;
/*
* BSD only has one VRF, if this changes we will need to hook in the
* right things here to get the id to pass to the address managment
* routine.
*/
if (first_time == 0) {
if (SCTP_BASE_VAR(first_time) == 0) {
/* Special test to see if my ::1 will showup with this */
first_time = 1;
SCTP_BASE_VAR(first_time) = 1;
sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
}
if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
@ -296,24 +286,22 @@ sctp_addr_change(struct ifaddr *ifa, int cmd)
if (ifa->ifa_addr == NULL) {
return;
}
if ((ifa->ifa_addr->sa_family != AF_INET) &&
(ifa->ifa_addr->sa_family != AF_INET6)
) {
if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
/* non inet/inet6 skip */
return;
}
if (ifa->ifa_addr->sa_family == AF_INET6) {
ifa6 = (struct in6_ifaddr *)ifa;
ifa_flags = ifa6->ia6_flags;
ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
/* skip unspecifed addresses */
return;
}
} else if (ifa->ifa_addr->sa_family == AF_INET) {
} else {
if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
return;
}
}
if (sctp_is_desired_interface_type(ifa) == 0) {
/* non desired type */
return;
@ -323,7 +311,7 @@ sctp_addr_change(struct ifaddr *ifa, int cmd)
ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type,
ifa->ifa_ifp->if_xname,
(void *)ifa, ifa->ifa_addr, ifa_flags, 1);
} else if (cmd == RTM_DELETE) {
} else {
sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
ifa->ifa_ifp->if_index,
@ -336,6 +324,21 @@ sctp_addr_change(struct ifaddr *ifa, int cmd)
}
}
void
sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
struct ifnet *ifn;
struct ifaddr *ifa;
TAILQ_FOREACH(ifn, &ifnet, if_list) {
if (!(*pred) (ifn)) {
continue;
}
TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
}
}
}
struct mbuf *
sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
int how, int allonebuf, int type)
@ -368,7 +371,7 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
SCTP_BUF_NEXT(m) = NULL;
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BUF_IS_EXTENDED(m)) {
sctp_log_mb(m, SCTP_MBUF_IALLOC);
}
@ -379,12 +382,6 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
#ifdef SCTP_PACKET_LOGGING
int packet_log_writers = 0;
int packet_log_end = 0;
uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE];
void
sctp_packet_log(struct mbuf *m, int length)
{
@ -406,40 +403,40 @@ sctp_packet_log(struct mbuf *m, int length)
/* Can't log this packet I have not a buffer big enough */
return;
}
if (length < (SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
return;
}
atomic_add_int(&packet_log_writers, 1);
atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
try_again:
if (packet_log_writers > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
SCTP_IP_PKTLOG_LOCK();
grabbed_lock = 1;
again_locked:
value = packet_log_end;
newval = packet_log_end + total_len;
value = SCTP_BASE_VAR(packet_log_end);
newval = SCTP_BASE_VAR(packet_log_end) + total_len;
if (newval >= SCTP_PACKET_LOG_SIZE) {
/* we wrapped */
thisbegin = 0;
thisend = total_len;
} else {
thisbegin = packet_log_end;
thisbegin = SCTP_BASE_VAR(packet_log_end);
thisend = newval;
}
if (!(atomic_cmpset_int(&packet_log_end, value, thisend))) {
if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
goto again_locked;
}
} else {
value = packet_log_end;
newval = packet_log_end + total_len;
value = SCTP_BASE_VAR(packet_log_end);
newval = SCTP_BASE_VAR(packet_log_end) + total_len;
if (newval >= SCTP_PACKET_LOG_SIZE) {
/* we wrapped */
thisbegin = 0;
thisend = total_len;
} else {
thisbegin = packet_log_end;
thisbegin = SCTP_BASE_VAR(packet_log_end);
thisend = newval;
}
if (!(atomic_cmpset_int(&packet_log_end, value, thisend))) {
if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
goto try_again;
}
}
@ -448,14 +445,14 @@ sctp_packet_log(struct mbuf *m, int length)
printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
thisbegin,
thisend,
packet_log_writers,
SCTP_BASE_VAR(packet_log_writers),
grabbed_lock,
packet_log_end);
packet_log_end = 0;
SCTP_BASE_VAR(packet_log_end));
SCTP_BASE_VAR(packet_log_end) = 0;
goto no_log;
}
lenat = (int *)&packet_log_buffer[thisbegin];
lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
*lenat = total_len;
lenat++;
*lenat = value;
@ -465,7 +462,7 @@ sctp_packet_log(struct mbuf *m, int length)
*tick_tock = sctp_get_tick_count();
copyto = (void *)lenat;
thisone = thisend - sizeof(int);
lenat = (int *)&packet_log_buffer[thisone];
lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
*lenat = thisbegin;
if (grabbed_lock) {
SCTP_IP_PKTLOG_UNLOCK();
@ -476,7 +473,7 @@ sctp_packet_log(struct mbuf *m, int length)
if (grabbed_lock) {
SCTP_IP_PKTLOG_UNLOCK();
}
atomic_subtract_int(&packet_log_writers, 1);
atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
}
@ -492,14 +489,14 @@ sctp_copy_out_packet_log(uint8_t * target, int length)
int did_delay = 0;
tocopy = length;
if (length < (2 * sizeof(int))) {
if (length < (int)(2 * sizeof(int))) {
/* not enough room */
return (0);
}
if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
atomic_add_int(&packet_log_writers, SCTP_PKTLOG_WRITERS_NEED_LOCK);
atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
again:
if ((did_delay == 0) && (packet_log_writers != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
/*
* we delay here for just a moment hoping the
* writer(s) that were present when we entered will
@ -515,12 +512,12 @@ sctp_copy_out_packet_log(uint8_t * target, int length)
}
SCTP_IP_PKTLOG_LOCK();
lenat = (int *)target;
*lenat = packet_log_end;
*lenat = SCTP_BASE_VAR(packet_log_end);
lenat++;
this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
memcpy((void *)lenat, (void *)packet_log_buffer, this_copy);
memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
atomic_subtract_int(&packet_log_writers,
atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
SCTP_PKTLOG_WRITERS_NEED_LOCK);
}
SCTP_IP_PKTLOG_UNLOCK();

View File

@ -58,5 +58,7 @@ int sctp_copy_out_packet_log(uint8_t * target, int length);
void sctp_addr_change(struct ifaddr *ifa, int cmd);
void sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add);
#endif
#endif

View File

@ -55,7 +55,7 @@ sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
net->ssthresh = stcb->asoc.peers_rwnd;
if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
}
}
@ -71,7 +71,7 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
* (net->fast_retran_loss_recovery == 0)))
*/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) {
/* out of a RFC2582 Fast recovery window? */
if (net->net_ack > 0) {
/*
@ -88,7 +88,7 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
net->ssthresh = 2 * net->mtu;
}
net->cwnd = net->ssthresh;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
SCTP_CWND_LOG_FROM_FR);
}
@ -165,7 +165,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
}
#endif
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
/*
* So, first of all do we need to have a Early FR
* timer running?
@ -201,7 +201,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
/* if nothing was acked on this destination skip it */
if (net->net_ack == 0) {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
}
continue;
@ -232,10 +232,11 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
*
* Should we stop any running T3 timer here?
*/
if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
net, net->cwnd);
/*
@ -259,7 +260,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
*/
#endif
if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
/*
* If we are in loss recovery we skip any cwnd
* update
@ -270,26 +271,26 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
* CMT: CUC algorithm. Update cwnd if pseudo-cumack has
* moved.
*/
if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) {
/* If the cumulative ack moved we can proceed */
if (net->cwnd <= net->ssthresh) {
/* We are in slow start */
if (net->flight_size + net->net_ack >= net->cwnd) {
if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
net->cwnd += (net->mtu * sctp_L2_abc_variable);
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_SS);
}
} else {
net->cwnd += net->net_ack;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_FROM_SS);
}
}
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
}
@ -305,19 +306,19 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
(net->partial_bytes_acked >= net->cwnd)) {
net->partial_bytes_acked -= net->cwnd;
net->cwnd += net->mtu;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_CA);
}
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_CA);
}
}
}
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_NO_CUMACK);
}
@ -351,7 +352,7 @@ sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
net->cwnd = net->mtu;
net->partial_bytes_acked = 0;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
}
}
@ -369,7 +370,7 @@ sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net)
net->RTO <<= 1;
}
net->cwnd = net->ssthresh;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
}
}
@ -478,7 +479,7 @@ sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
}
if (net->cwnd - old_cwnd != 0) {
/* log only changes */
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
SCTP_CWND_LOG_FROM_SAT);
}
@ -495,7 +496,7 @@ sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
net->ssthresh = net->cwnd;
net->cwnd = (net->flight_size + (burst_limit * net->mtu));
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
}
}
@ -516,7 +517,7 @@ sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
if (net->cwnd < net->ssthresh)
/* still in SS move to CA */
net->ssthresh = net->cwnd - 1;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
}
}
@ -619,12 +620,12 @@ sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
/* normal mode */
if (net->net_ack > net->mtu) {
net->cwnd += net->mtu;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
}
} else {
net->cwnd += net->net_ack;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
}
}
@ -638,7 +639,7 @@ sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
net->last_hs_used = indx;
incr = ((sctp_cwnd_adjust[indx].increase) << 10);
net->cwnd += incr;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
}
}
@ -680,7 +681,7 @@ sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
net->last_hs_used = indx;
}
}
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
}
}
@ -696,7 +697,7 @@ sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
* (net->fast_retran_loss_recovery == 0)))
*/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) {
/* out of a RFC2582 Fast recovery window? */
if (net->net_ack > 0) {
/*
@ -782,7 +783,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
}
#endif
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
/*
* So, first of all do we need to have a Early FR
* timer running?
@ -818,7 +819,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
/* if nothing was acked on this destination skip it */
if (net->net_ack == 0) {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
}
continue;
@ -849,10 +850,11 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
*
* Should we stop any running T3 timer here?
*/
if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
net, net->cwnd);
/*
@ -876,7 +878,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
*/
#endif
if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
/*
* If we are in loss recovery we skip any cwnd
* update
@ -887,7 +889,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
* CMT: CUC algorithm. Update cwnd if pseudo-cumack has
* moved.
*/
if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) {
/* If the cumulative ack moved we can proceed */
if (net->cwnd <= net->ssthresh) {
/* We are in slow start */
@ -896,7 +898,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
sctp_hs_cwnd_increase(stcb, net);
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
}
@ -908,19 +910,19 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
(net->partial_bytes_acked >= net->cwnd)) {
net->partial_bytes_acked -= net->cwnd;
net->cwnd += net->mtu;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_CA);
}
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_CA);
}
}
}
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_NO_CUMACK);
}
@ -1151,21 +1153,21 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
if (net->cwnd <= net->ssthresh) {
/* We are in slow start */
if (net->flight_size + net->net_ack >= net->cwnd) {
if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
net->cwnd += (net->mtu * sctp_L2_abc_variable);
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_SS);
}
} else {
net->cwnd += net->net_ack;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_FROM_SS);
}
}
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
}
@ -1186,13 +1188,13 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
net->cwnd += net->mtu;
net->partial_bytes_acked = 0;
htcp_alpha_update(&net->htcp_ca);
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_CA);
}
} else {
net->partial_bytes_acked += net->net_ack;
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_CA);
}
@ -1233,7 +1235,7 @@ sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
net->ssthresh = stcb->asoc.peers_rwnd;
htcp_init(stcb, net);
if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
}
}
@ -1264,7 +1266,7 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
}
#endif
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
/*
* So, first of all do we need to have a Early FR
* timer running?
@ -1300,7 +1302,7 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
/* if nothing was acked on this destination skip it */
if (net->net_ack == 0) {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
}
continue;
@ -1331,10 +1333,11 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
*
* Should we stop any running T3 timer here?
*/
if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
net, net->cwnd);
/*
@ -1358,7 +1361,7 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
*/
#endif
if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
/*
* If we are in loss recovery we skip any cwnd
* update
@ -1369,11 +1372,11 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
* CMT: CUC algorithm. Update cwnd if pseudo-cumack has
* moved.
*/
if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) {
htcp_cong_avoid(stcb, net);
measure_achieved_throughput(stcb, net);
} else {
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_NO_CUMACK);
}
@ -1409,7 +1412,7 @@ sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
* (net->fast_retran_loss_recovery == 0)))
*/
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) {
/* out of a RFC2582 Fast recovery window? */
if (net->net_ack > 0) {
/*
@ -1425,7 +1428,7 @@ sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
htcp_reset(&net->htcp_ca);
net->ssthresh = htcp_recalc_ssthresh(stcb, net);
net->cwnd = net->ssthresh;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
SCTP_CWND_LOG_FROM_FR);
}
@ -1487,7 +1490,7 @@ sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
net->ssthresh = htcp_recalc_ssthresh(stcb, net);
net->cwnd = net->mtu;
net->partial_bytes_acked = 0;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
}
}
@ -1511,7 +1514,7 @@ sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
if (net->cwnd < net->ssthresh)
/* still in SS move to CA */
net->ssthresh = net->cwnd - 1;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
}
}
@ -1534,7 +1537,7 @@ sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
net->RTO <<= 1;
}
net->cwnd = net->ssthresh;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
}
}

View File

@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp.h>
#include <netinet/sctp_constants.h>
#define SCTP_PACKED __attribute__((packed))
/*
* Parameter structures
@ -581,4 +582,5 @@ struct sctp_auth_invalid_hmac {
#define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \
sizeof(struct sctphdr))
#undef SCTP_PACKED
#endif /* !__sctp_header_h__ */

View File

@ -59,69 +59,14 @@ __FBSDID("$FreeBSD$");
void
sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
{
uint32_t calc, calc_save;
/*
* This is really set wrong with respect to a 1-2-m socket. Since
* the sb_cc is the count that everyone as put up. When we re-write
* sctp_soreceive then we will fix this so that ONLY this
* associations data is taken into account.
*/
if (stcb->sctp_socket == NULL)
return;
if (stcb->asoc.sb_cc == 0 &&
asoc->size_on_reasm_queue == 0 &&
asoc->size_on_all_streams == 0) {
/* Full rwnd granted */
asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
SCTP_MINIMAL_RWND);
return;
}
/* get actual space */
calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
/*
* take out what has NOT been put on socket queue and we yet hold
* for putting up.
*/
calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
if (calc == 0) {
/* out of space */
asoc->my_rwnd = 0;
return;
}
/* what is the overhead of all these rwnd's */
calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
calc_save = calc;
asoc->my_rwnd = calc;
if ((asoc->my_rwnd == 0) &&
(calc < stcb->asoc.my_rwnd_control_len)) {
/*-
* If our rwnd == 0 && the overhead is greater than the
* data onqueue, we clamp the rwnd to 1. This lets us
* still accept inbound segments, but hopefully will shut
* the sender down when he finally gets the message. This
* hopefully will gracefully avoid discarding packets.
*/
asoc->my_rwnd = 1;
}
if (asoc->my_rwnd &&
(asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
/* SWS engaged, tell peer none left */
asoc->my_rwnd = 1;
}
asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
}
/* Calculate what the rwnd would be */
uint32_t
sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
{
uint32_t calc = 0, calc_save = 0, result = 0;
uint32_t calc = 0;
/*
* This is really set wrong with respect to a 1-2-m socket. Since
@ -136,8 +81,7 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
asoc->size_on_reasm_queue == 0 &&
asoc->size_on_all_streams == 0) {
/* Full rwnd granted */
calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
SCTP_MINIMAL_RWND);
calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
return (calc);
}
/* get actual space */
@ -156,26 +100,14 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
}
/* what is the overhead of all these rwnd's */
calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
calc_save = calc;
result = calc;
if ((result == 0) &&
(calc < stcb->asoc.my_rwnd_control_len)) {
/*-
* If our rwnd == 0 && the overhead is greater than the
* data onqueue, we clamp the rwnd to 1. This lets us
* still accept inbound segments, but hopefully will shut
* the sender down when he finally gets the message. This
* hopefully will gracefully avoid discarding packets.
/*
* If the window gets too small due to ctrl-stuff, reduce it to 1,
* even it is 0. SWS engaged
*/
result = 1;
if (calc < stcb->asoc.my_rwnd_control_len) {
calc = 1;
}
if (result &&
(result < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
/* SWS engaged, tell peer none left */
result = 1;
}
return (result);
return (calc);
}
@ -568,7 +500,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
sctp_ucount_incr(asoc->cnt_on_all_streams);
strm = &asoc->strmin[control->sinfo_stream];
nxt_todel = strm->last_sequence_delivered + 1;
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
}
SCTPDBG(SCTP_DEBUG_INDATA1,
@ -616,7 +548,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
if (nxt_todel == control->sinfo_ssn) {
/* can be delivered right away? */
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
}
queue_needed = 0;
@ -642,7 +574,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
* d-queue. And we have a finite number that
* can be delivered from the strq.
*/
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL,
SCTP_STR_LOG_FROM_IMMED_DEL);
}
@ -667,7 +599,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
if (TAILQ_EMPTY(&strm->inqueue)) {
/* Empty queue */
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
}
TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
@ -679,7 +611,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
* one in queue is bigger than the
* new one, insert before this one
*/
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, at,
SCTP_STR_LOG_FROM_INSERT_MD);
}
@ -716,7 +648,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
* We are at the end, insert
* it after this one
*/
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, at,
SCTP_STR_LOG_FROM_INSERT_TL);
}
@ -1465,9 +1397,12 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
chk = NULL;
tsn = ntohl(ch->dp.tsn);
chunk_flags = ch->ch.chunk_flags;
if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
asoc->send_sack = 1;
}
protocol_id = ch->dp.protocol_id;
ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
}
if (stcb == NULL) {
@ -1545,7 +1480,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* now do the tests */
if (((asoc->cnt_on_all_streams +
asoc->cnt_on_reasm_queue +
asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
(((int)asoc->my_rwnd) <= 0)) {
/*
* When we have NO room in the rwnd we check to make sure
@ -1574,14 +1509,12 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
#endif
}
/* now is it in the mapping array of what we have accepted? */
if (compare_with_wrap(tsn,
asoc->highest_tsn_inside_map, MAX_TSN)) {
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
/* Nope not in the valid range dump it */
sctp_set_rwnd(stcb, asoc);
if ((asoc->cnt_on_all_streams +
asoc->cnt_on_reasm_queue +
asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
SCTP_STAT_INCR(sctps_datadropchklmt);
} else {
SCTP_STAT_INCR(sctps_datadroprwnd);
@ -1624,7 +1557,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
/* we have a new high score */
asoc->highest_tsn_inside_map = tsn;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
}
@ -1703,7 +1636,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
(offset + sizeof(struct sctp_data_chunk)),
the_len, M_DONTWAIT);
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = dmbuf;
@ -1779,7 +1712,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
asoc->strmin[strmno].last_sequence_delivered++;
}
SCTP_STAT_INCR(sctps_recvexpress);
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
SCTP_STR_LOG_FROM_EXPRS_DEL);
}
@ -2110,7 +2043,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
/* we have a new high score */
asoc->highest_tsn_inside_map = tsn;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
}
@ -2128,10 +2061,10 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
SCTP_STAT_INCR(sctps_recvdata);
/* Set it present please */
if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
}
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
}
@ -2240,7 +2173,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* 1) Did we move the cum-ack point?
*/
struct sctp_association *asoc;
int i, at;
int at;
int last_all_ones = 0;
int slide_from, slide_end, lgap, distance;
uint32_t old_cumack, old_base, old_highest;
@ -2264,14 +2197,14 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* offset of the current cum-ack as the starting point.
*/
at = 0;
for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
if (asoc->mapping_array[i] == 0xff) {
if (asoc->mapping_array[slide_from] == 0xff) {
at += 8;
last_all_ones = 1;
} else {
/* there is a 0 bit */
at += sctp_map_lookup_tab[asoc->mapping_array[i]];
at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
last_all_ones = 0;
break;
}
@ -2289,6 +2222,9 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
#else
SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
#endif
}
@ -2306,7 +2242,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
memset(asoc->mapping_array, 0, clr);
/* base becomes one ahead of the cum-ack */
asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(old_base, old_cumack, old_highest,
SCTP_MAP_PREPARE_SLIDE);
sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
@ -2314,8 +2250,8 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
}
} else if (at >= 8) {
/* we can slide the mapping array down */
/* Calculate the new byte postion we can move down */
slide_from = at >> 3;
/* slide_from holds where we hit the first NON 0xff byte */
/*
* now calculate the ceiling of the move using our highest
* TSN value
@ -2334,10 +2270,19 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
#else
printf("impossible slide?\n");
return;
#endif
}
if (slide_end > asoc->mapping_array_size) {
#ifdef INVARIANTS
panic("would overrun buffer");
#else
printf("Gak, would have overrun map end:%d slide_end:%d\n",
asoc->mapping_array_size, slide_end);
slide_end = asoc->mapping_array_size;
#endif
}
distance = (slide_end - slide_from) + 1;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(old_base, old_cumack, old_highest,
SCTP_MAP_PREPARE_SLIDE);
sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
@ -2352,7 +2297,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* don't think this should happen :-0
*/
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
(uint32_t) asoc->mapping_array_size,
SCTP_MAP_SLIDE_NONE);
@ -2368,7 +2313,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
asoc->mapping_array[ii] = 0;
}
asoc->mapping_array_base_tsn += (slide_from << 3);
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(asoc->mapping_array_base_tsn,
asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
SCTP_MAP_SLIDE_RESULT);
@ -2415,7 +2360,8 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
(stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
) {
if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
(SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
(stcb->asoc.send_sack == 0) &&
(stcb->asoc.numduptsns == 0) &&
(stcb->asoc.delayed_ack) &&
@ -2679,7 +2625,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
* switch out and do either an ABORT() or
* possibly process them.
*/
if (sctp_strict_data_order) {
if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
struct mbuf *op_err;
op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
@ -2755,7 +2701,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
* give peer credit for being alive.
*/
SCTP_STAT_INCR(sctps_recvpktwithdata);
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -2953,7 +2899,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
* ewack
*/
if (*this_sack_lowest_newack == 0) {
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(*this_sack_lowest_newack,
last_tsn,
tp1->rec.data.TSN_seq,
@ -2995,7 +2941,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
}
tp1->whoTo->find_pseudo_cumack = 1;
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
}
if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
@ -3004,7 +2950,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
}
tp1->whoTo->find_rtx_pseudo_cumack = 1;
}
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(*biggest_newly_acked_tsn,
last_tsn,
tp1->rec.data.TSN_seq,
@ -3012,7 +2958,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
frag_end,
SCTP_LOG_TSN_ACKED);
}
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
tp1->whoTo->flight_size,
tp1->book_size,
@ -3090,7 +3036,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
break;
}
}
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
if (num_frs)
sctp_log_fr(*biggest_tsn_acked,
*biggest_newly_acked_tsn,
@ -3129,7 +3075,7 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
* We must add this stuff back in to assure
* timers and such get started.
*/
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
tp1->whoTo->flight_size,
tp1->book_size,
@ -3144,7 +3090,7 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
*/
tp1->whoTo->cwnd += tp1->book_size;
tot_revoked++;
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cumack,
tp1->rec.data.TSN_seq,
@ -3204,7 +3150,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
/* CMT DAC algo: finding out if SACK is a mixed SACK */
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if (net->saw_newack)
num_dests_sacked++;
@ -3221,7 +3167,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1 = TAILQ_NEXT(tp1, sctp_next);
continue;
}
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
if (tp1->sent < SCTP_DATAGRAM_RESEND)
sctp_log_fr(biggest_tsn_newly_acked,
tp1->rec.data.TSN_seq,
@ -3316,7 +3262,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* Strike the TSN if in fast-recovery and cum-ack
* moved.
*/
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(biggest_tsn_newly_acked,
tp1->rec.data.TSN_seq,
tp1->sent,
@ -3325,7 +3271,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
tp1->sent++;
}
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*
* CMT DAC algorithm: If SACK flag is set to
* 0, then lowest_newack test will not pass
@ -3340,7 +3286,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(16 + num_dests_sacked,
tp1->rec.data.TSN_seq,
tp1->sent,
@ -3349,7 +3295,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->sent++;
}
}
} else if ((tp1->rec.data.doing_fast_retransmit) && (sctp_cmt_on_off == 0)) {
} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
/*
* For those that have done a FR we must take
* special consideration if we strike. I.e the
@ -3379,7 +3325,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* beyond where things were when we
* did a FR.
*/
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(biggest_tsn_newly_acked,
tp1->rec.data.TSN_seq,
tp1->sent,
@ -3389,7 +3335,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->sent++;
}
strike_flag = 1;
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*
* CMT DAC algorithm: If
* SACK flag is set to 0,
@ -3413,7 +3359,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
(num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack,
tp1->rec.data.TSN_seq, MAX_TSN)) {
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(32 + num_dests_sacked,
tp1->rec.data.TSN_seq,
tp1->sent,
@ -3440,7 +3386,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
;
} else {
/* Strike the TSN */
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(biggest_tsn_newly_acked,
tp1->rec.data.TSN_seq,
tp1->sent,
@ -3449,7 +3395,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
tp1->sent++;
}
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*
* CMT DAC algorithm: If SACK flag is set to
* 0, then lowest_newack test will not pass
@ -3464,7 +3410,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(48 + num_dests_sacked,
tp1->rec.data.TSN_seq,
tp1->sent,
@ -3479,7 +3425,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
struct sctp_nets *alt;
/* printf("OK, we are now ready to FR this guy\n"); */
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
0, SCTP_FR_MARKED);
}
@ -3488,7 +3434,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_STAT_INCR(sctps_sendmultfastretrans);
}
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
if (sctp_cmt_on_off) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
/*
* CMT: Using RTX_SSTHRESH policy for CMT.
* If CMT is being used, then pick dest with
@ -3497,7 +3443,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->no_fr_allowed = 1;
alt = tp1->whoTo;
/* sa_ignore NO_NULL_CHK */
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
/*
* JRS 5/18/07 - If CMT PF is on,
* use the PF version of
@ -3581,7 +3527,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->do_rtt = 0;
}
/* fix counts and things */
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
(tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
tp1->book_size,
@ -3592,12 +3538,12 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->whoTo->net_ack++;
sctp_flight_size_decrease(tp1);
}
if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
}
/* add back to the rwnd */
asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
/* remove from the total flight */
sctp_total_flight_decrease(stcb, tp1);
@ -3730,7 +3676,7 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
SCTP_SOCKET_UNLOCK(so, 1);
#endif
if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
}
}
@ -3792,7 +3738,7 @@ sctp_window_probe_recovery(struct sctp_tcb *stcb,
/* First setup this one and get it moved back */
tp1->sent = SCTP_DATAGRAM_UNSENT;
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
tp1->whoTo->flight_size,
tp1->book_size,
@ -3835,7 +3781,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
int win_probe_recovered = 0;
int j, done_once = 0;
if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
}
@ -3855,7 +3801,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
} else if (asoc->last_acked_seq == cumack) {
/* Window update sack */
asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -3878,7 +3824,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
net->new_pseudo_cumack = 0;
net->will_exit_fast_recovery = 0;
}
if (sctp_strict_sacks) {
if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
uint32_t send_s;
if (!TAILQ_EMPTY(&asoc->sent_queue)) {
@ -3920,7 +3866,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
}
}
asoc->this_sack_highest_gap = cumack;
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -3951,7 +3897,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
* values may occur during marking
*/
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
tp1->whoTo->flight_size,
tp1->book_size,
@ -4001,7 +3947,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
tp1->whoTo->find_pseudo_cumack = 1;
tp1->whoTo->find_rtx_pseudo_cumack = 1;
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
/* sa_ignore NO_NULL_CHK */
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
}
@ -4021,7 +3967,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
sctp_free_bufspace(stcb, asoc, tp1, 1);
sctp_m_freem(tp1->data);
}
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cumack,
tp1->rec.data.TSN_seq,
@ -4047,7 +3993,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
#endif
SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
/* sa_ignore NO_NULL_CHK */
sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
}
@ -4069,7 +4015,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
SCTP_SOCKET_UNLOCK(so, 1);
#endif
} else {
if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
}
}
@ -4143,7 +4089,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
}
/* RWND update */
asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -4188,7 +4134,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
stcb, net,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
}
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
@ -4308,7 +4254,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
stcb->sctp_ep, stcb, asoc->primary_destination);
}
}
if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
rwnd,
stcb->asoc.peers_rwnd,
@ -4385,7 +4331,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
num_seg = ntohs(sack->num_gap_ack_blks);
a_rwnd = rwnd;
if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
}
@ -4394,7 +4340,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
num_dup = ntohs(sack->num_dup_tsns);
old_rwnd = stcb->asoc.peers_rwnd;
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4403,7 +4349,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
}
stcb->asoc.overall_error_count = 0;
asoc = &stcb->asoc;
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cum_ack,
0,
@ -4411,7 +4357,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
num_dup,
SCTP_LOG_NEW_SACK);
}
if ((num_dup) && (sctp_logging_level & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
int off_to_dup, iii;
uint32_t *dupdata, dblock;
@ -4435,7 +4381,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
off_to_dup, num_dup, sack_length, num_seg);
}
}
if (sctp_strict_sacks) {
if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
/* reality check */
if (!TAILQ_EMPTY(&asoc->sent_queue)) {
tp1 = TAILQ_LAST(&asoc->sent_queue,
@ -4498,7 +4444,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
(asoc->stream_queue_cnt == 0)
) {
/* nothing left on send/sent and strmq */
if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
asoc->peers_rwnd, 0, 0, a_rwnd);
}
@ -4514,7 +4460,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
@ -4581,7 +4527,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
~SCTP_ADDR_UNCONFIRMED;
}
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
tp1->whoTo->flight_size,
tp1->book_size,
@ -4632,7 +4578,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
tp1->whoTo->find_rtx_pseudo_cumack = 1;
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cum_ack,
tp1->rec.data.TSN_seq,
@ -4640,7 +4586,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
0,
SCTP_LOG_TSN_ACKED);
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
}
}
@ -4696,7 +4642,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
&biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
num_seg, &ecn_seg_sums);
if (sctp_strict_sacks) {
if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
/*
* validate the biggest_tsn_acked in the gap acks if
* strict adherence is wanted.
@ -4715,7 +4661,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
/*******************************************/
/* cancel ALL T3-send timer if accum moved */
/*******************************************/
if (sctp_cmt_on_off) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if (net->new_pseudo_cumack)
sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
@ -4773,7 +4719,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
asoc->sent_queue_cnt_removeable--;
}
}
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cum_ack,
tp1->rec.data.TSN_seq,
@ -4796,7 +4742,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
#endif
SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
}
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
@ -4817,7 +4763,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
SCTP_SOCKET_UNLOCK(so, 1);
#endif
} else {
if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
}
}
@ -4852,7 +4798,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
(tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
tp1->sent = SCTP_DATAGRAM_SENT;
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
tp1->whoTo->flight_size,
tp1->book_size,
@ -4891,7 +4837,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
/* nothing left in-flight */
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
/* stop all timers */
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
@ -4911,7 +4857,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
/**********************************/
if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
/* nothing left on sendqueue.. consider done */
if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
asoc->peers_rwnd, 0, 0, a_rwnd);
}
@ -5016,7 +4962,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
* to be done. Setting this_sack_lowest_newack to the cum_ack will
* automatically ensure that.
*/
if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
this_sack_lowest_newack = cum_ack;
}
if (num_seg > 0) {
@ -5140,12 +5086,12 @@ sctp_handle_sack(struct mbuf *m, int offset,
}
/* Adjust and set the new rwnd value */
if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
}
asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -5187,7 +5133,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
stcb, net,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
}
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
@ -5220,7 +5166,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
done_once = 1;
goto again;
}
if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
a_rwnd,
stcb->asoc.peers_rwnd,
@ -5364,7 +5310,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
MAX_TSN)) {
asoc->highest_tsn_inside_map = new_cum_tsn;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
}
@ -5382,7 +5328,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
}
if (gap >= m_size) {
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
@ -5423,7 +5369,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
asoc->mapping_array_base_tsn = new_cum_tsn + 1;
asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
asoc->last_echo_tsn = asoc->highest_tsn_inside_map;

View File

@ -267,7 +267,7 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
lnet->ssthresh = asoc->peers_rwnd;
if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
}
}
@ -316,7 +316,7 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
asoc->streamoutcnt = asoc->pre_open_streams;
/* init tsn's */
asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
/* This is the next one we expect */
@ -446,7 +446,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
op_err = NULL;
}
/* extract the cookie and queue it to "echo" it back... */
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -596,15 +596,16 @@ sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
* timer is running, for the destination, stop the timer because a
* PF-heartbeat was received.
*/
if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
(net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
stcb, net,
SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
}
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
net, net->cwnd);
}
@ -1172,7 +1173,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
/* reset the RTO calc */
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -1680,8 +1681,8 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
/* pull from vtag hash */
LIST_REMOVE(stcb, sctp_asocs);
/* re-insert to new vtag position */
head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
sctppcbinfo.hashasocmark)];
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
SCTP_BASE_INFO(hashasocmark))];
/*
* put it in the bucket in the vtag hash of assoc's for the
* system
@ -1691,8 +1692,8 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
/* Is this the first restart? */
if (stcb->asoc.in_restart_hash == 0) {
/* Ok add it to assoc_id vtag hash */
head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
sctppcbinfo.hashrestartmark)];
head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
SCTP_BASE_INFO(hashrestartmark))];
LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
stcb->asoc.in_restart_hash = 1;
}
@ -2207,7 +2208,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
return (NULL);
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = m_sig;
@ -2967,7 +2968,7 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
stcb, tp1->whoTo);
/* fix counts and things */
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
tp1->whoTo->flight_size,
tp1->book_size,
@ -3285,6 +3286,9 @@ sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
return (1);
}
stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
@ -3388,6 +3392,9 @@ sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
return (1);
}
stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
@ -3881,7 +3888,8 @@ __attribute__((noinline))
* valid.
*/
if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
(stcb == NULL) && !sctp_auth_disable) {
(stcb == NULL) &&
!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
/* save this chunk for later processing */
auth_skipped = 1;
auth_offset = *offset;
@ -4034,7 +4042,7 @@ __attribute__((noinline))
(ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
(SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
/* implied cookie-ack.. we must have lost the ack */
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4133,9 +4141,9 @@ __attribute__((noinline))
#endif
/* check to see if this chunk required auth, but isn't */
if ((stcb != NULL) && !sctp_auth_disable &&
sctp_auth_is_required_chunk(ch->chunk_type,
stcb->asoc.local_auth_chunks) &&
if ((stcb != NULL) &&
!SCTP_BASE_SYSCTL(sctp_auth_disable) &&
sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
!stcb->asoc.authenticated) {
/* "silently" ignore */
SCTP_STAT_INCR(sctps_recvauthmissing);
@ -4163,7 +4171,7 @@ __attribute__((noinline))
}
if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
(num_chunks > 1) ||
(sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
(SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
@ -4228,7 +4236,7 @@ __attribute__((noinline))
}
}
if ((num_chunks > 1) ||
(sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
(SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
@ -4332,7 +4340,7 @@ __attribute__((noinline))
chk_length, *netp);
/* He's alive so give him credit */
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4353,7 +4361,7 @@ __attribute__((noinline))
return (NULL);
}
/* He's alive so give him credit */
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4434,7 +4442,7 @@ __attribute__((noinline))
if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
(sctp_abort_if_one_2_one_hits_limit)) {
(SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
struct mbuf *oper;
struct sctp_paramhdr *phdr;
@ -4548,7 +4556,7 @@ __attribute__((noinline))
}
/* He's alive so give him credit */
if ((stcb) && netp && *netp) {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4571,7 +4579,7 @@ __attribute__((noinline))
return (NULL);
}
if (stcb) {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4595,7 +4603,7 @@ __attribute__((noinline))
return (NULL);
}
if (stcb) {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4628,7 +4636,7 @@ __attribute__((noinline))
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
/* He's alive so give him credit */
if (stcb) {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4653,7 +4661,7 @@ __attribute__((noinline))
}
if ((stcb) && netp && *netp) {
/* He's alive so give him credit */
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4682,7 +4690,7 @@ __attribute__((noinline))
int abort_flag = 0;
stcb->asoc.overall_error_count = 0;
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4713,7 +4721,7 @@ __attribute__((noinline))
*offset = length;
return (NULL);
} else {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -4787,7 +4795,7 @@ __attribute__((noinline))
case SCTP_AUTHENTICATION:
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
if (sctp_auth_disable)
if (SCTP_BASE_SYSCTL(sctp_auth_disable))
goto unknown_chunk;
if (stcb == NULL) {
@ -4851,7 +4859,7 @@ __attribute__((noinline))
M_DONTWAIT);
if (SCTP_BUF_NEXT(mm)) {
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = SCTP_BUF_NEXT(mm);
@ -5005,8 +5013,8 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
sctp_auditing(0, inp, stcb, net);
#endif
SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n",
m, iphlen, offset, stcb);
SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
m, iphlen, offset, length, stcb);
if (stcb) {
/* always clear this before beginning a packet */
stcb->asoc.authenticated = 0;
@ -5039,6 +5047,12 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
* it changes our INP.
*/
inp = stcb->sctp_ep;
if ((net) && (port)) {
if (net->port == 0) {
sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
}
net->port = port;
}
}
} else {
/*
@ -5051,9 +5065,9 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
* can't have authenticated without any AUTH (control)
* chunks
*/
if ((stcb != NULL) && !sctp_auth_disable &&
sctp_auth_is_required_chunk(SCTP_DATA,
stcb->asoc.local_auth_chunks)) {
if ((stcb != NULL) &&
!SCTP_BASE_SYSCTL(sctp_auth_disable) &&
sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
/* "silently" ignore */
SCTP_STAT_INCR(sctps_recvauthmissing);
SCTP_TCB_UNLOCK(stcb);
@ -5090,9 +5104,10 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
* Rest should be DATA only. Check authentication state if AUTH for
* DATA is required.
*/
if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
sctp_auth_is_required_chunk(SCTP_DATA,
stcb->asoc.local_auth_chunks) &&
if ((length > offset) &&
(stcb != NULL) &&
!SCTP_BASE_SYSCTL(sctp_auth_disable) &&
sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
!stcb->asoc.authenticated) {
/* "silently" ignore */
SCTP_STAT_INCR(sctps_recvauthmissing);
@ -5115,7 +5130,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
* shows us the cookie-ack was lost. Imply it was
* there.
*/
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
stcb->asoc.overall_error_count,
0,
@ -5266,7 +5281,7 @@ sctp_input_with_port(i_pak, off, port)
#ifdef SCTP_MBUF_LOGGING
/* Log in any input mbufs */
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
mat = m;
while (mat) {
if (SCTP_BUF_IS_EXTENDED(mat)) {
@ -5277,7 +5292,7 @@ sctp_input_with_port(i_pak, off, port)
}
#endif
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(m, mlen);
#endif
/*
@ -5316,7 +5331,7 @@ sctp_input_with_port(i_pak, off, port)
}
/* validate SCTP checksum */
check = sh->checksum; /* save incoming checksum */
if ((check == 0) && (sctp_no_csum_on_loopback) &&
if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
(SCTP_IS_IT_LOOPBACK(m)))
) {

View File

@ -66,7 +66,7 @@
* When working with the global SCTP lists we lock and unlock the INP_INFO
* lock. So when we go to lookup an association we will want to do a
* SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
* the sctppcbinfo list's we will do a SCTP_INP_INFO_WLOCK().
* the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@ -83,80 +83,80 @@ extern int sctp_logoff_stuff;
#define SCTP_STATLOG_DESTROY()
#define SCTP_INP_INFO_LOCK_DESTROY() do { \
if(rw_wowned(&sctppcbinfo.ipi_ep_mtx)) { \
rw_wunlock(&sctppcbinfo.ipi_ep_mtx); \
if(rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) { \
rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
} \
rw_destroy(&sctppcbinfo.ipi_ep_mtx); \
rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx)); \
} while (0)
#define SCTP_INP_INFO_LOCK_INIT() \
rw_init(&sctppcbinfo.ipi_ep_mtx, "sctp-info");
rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info");
#define SCTP_INP_INFO_RLOCK() do { \
rw_rlock(&sctppcbinfo.ipi_ep_mtx); \
rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
} while (0)
#define SCTP_INP_INFO_WLOCK() do { \
rw_wlock(&sctppcbinfo.ipi_ep_mtx); \
rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
} while (0)
#define SCTP_INP_INFO_RUNLOCK() rw_runlock(&sctppcbinfo.ipi_ep_mtx)
#define SCTP_INP_INFO_WUNLOCK() rw_wunlock(&sctppcbinfo.ipi_ep_mtx)
#define SCTP_INP_INFO_RUNLOCK() rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx))
#define SCTP_INP_INFO_WUNLOCK() rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx))
#define SCTP_IPI_ADDR_INIT() \
rw_init(&sctppcbinfo.ipi_addr_mtx, "sctp-addr")
rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr")
#define SCTP_IPI_ADDR_DESTROY() do { \
if(rw_wowned(&sctppcbinfo.ipi_addr_mtx)) { \
rw_wunlock(&sctppcbinfo.ipi_addr_mtx); \
if(rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) { \
rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
} \
rw_destroy(&sctppcbinfo.ipi_addr_mtx); \
rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx)); \
} while (0)
#define SCTP_IPI_ADDR_RLOCK() do { \
rw_rlock(&sctppcbinfo.ipi_addr_mtx); \
rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
} while (0)
#define SCTP_IPI_ADDR_WLOCK() do { \
rw_wlock(&sctppcbinfo.ipi_addr_mtx); \
rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
} while (0)
#define SCTP_IPI_ADDR_RUNLOCK() rw_runlock(&sctppcbinfo.ipi_addr_mtx)
#define SCTP_IPI_ADDR_WUNLOCK() rw_wunlock(&sctppcbinfo.ipi_addr_mtx)
#define SCTP_IPI_ADDR_RUNLOCK() rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx))
#define SCTP_IPI_ADDR_WUNLOCK() rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx))
#define SCTP_IPI_ITERATOR_WQ_INIT() \
mtx_init(&sctppcbinfo.ipi_iterator_wq_mtx, "sctp-it-wq", "sctp_it_wq", MTX_DEF)
mtx_init(&SCTP_BASE_INFO(ipi_iterator_wq_mtx), "sctp-it-wq", "sctp_it_wq", MTX_DEF)
#define SCTP_IPI_ITERATOR_WQ_DESTROY() \
mtx_destroy(&sctppcbinfo.ipi_iterator_wq_mtx)
mtx_destroy(&SCTP_BASE_INFO(ipi_iterator_wq_mtx))
#define SCTP_IPI_ITERATOR_WQ_LOCK() do { \
mtx_lock(&sctppcbinfo.ipi_iterator_wq_mtx); \
mtx_lock(&SCTP_BASE_INFO(ipi_iterator_wq_mtx)); \
} while (0)
#define SCTP_IPI_ITERATOR_WQ_UNLOCK() mtx_unlock(&sctppcbinfo.ipi_iterator_wq_mtx)
#define SCTP_IPI_ITERATOR_WQ_UNLOCK() mtx_unlock(&SCTP_BASE_INFO(ipi_iterator_wq_mtx))
#define SCTP_IP_PKTLOG_INIT() \
mtx_init(&sctppcbinfo.ipi_pktlog_mtx, "sctp-pktlog", "packetlog", MTX_DEF)
mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog", "packetlog", MTX_DEF)
#define SCTP_IP_PKTLOG_LOCK() do { \
mtx_lock(&sctppcbinfo.ipi_pktlog_mtx); \
mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)); \
} while (0)
#define SCTP_IP_PKTLOG_UNLOCK() mtx_unlock(&sctppcbinfo.ipi_pktlog_mtx)
#define SCTP_IP_PKTLOG_UNLOCK() mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
#define SCTP_IP_PKTLOG_DESTROY() \
mtx_destroy(&sctppcbinfo.ipi_pktlog_mtx)
mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
@ -300,97 +300,97 @@ extern int sctp_logoff_stuff;
#endif
#define SCTP_ITERATOR_LOCK_INIT() \
mtx_init(&sctppcbinfo.it_mtx, "sctp-it", "iterator", MTX_DEF)
mtx_init(&SCTP_BASE_INFO(it_mtx), "sctp-it", "iterator", MTX_DEF)
#ifdef INVARIANTS
#define SCTP_ITERATOR_LOCK() \
do { \
if (mtx_owned(&sctppcbinfo.it_mtx)) \
if (mtx_owned(&SCTP_BASE_INFO(it_mtx))) \
panic("Iterator Lock"); \
mtx_lock(&sctppcbinfo.it_mtx); \
mtx_lock(&SCTP_BASE_INFO(it_mtx)); \
} while (0)
#else
#define SCTP_ITERATOR_LOCK() \
do { \
mtx_lock(&sctppcbinfo.it_mtx); \
mtx_lock(&SCTP_BASE_INFO(it_mtx)); \
} while (0)
#endif
#define SCTP_ITERATOR_UNLOCK() mtx_unlock(&sctppcbinfo.it_mtx)
#define SCTP_ITERATOR_LOCK_DESTROY() mtx_destroy(&sctppcbinfo.it_mtx)
#define SCTP_ITERATOR_UNLOCK() mtx_unlock(&SCTP_BASE_INFO(it_mtx))
#define SCTP_ITERATOR_LOCK_DESTROY() mtx_destroy(&SCTP_BASE_INFO(it_mtx))
#define SCTP_INCR_EP_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_ep, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
} while (0)
#define SCTP_DECR_EP_COUNT() \
do { \
atomic_subtract_int(&sctppcbinfo.ipi_count_ep, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
} while (0)
#define SCTP_INCR_ASOC_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_asoc, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
} while (0)
#define SCTP_DECR_ASOC_COUNT() \
do { \
atomic_subtract_int(&sctppcbinfo.ipi_count_asoc, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
} while (0)
#define SCTP_INCR_LADDR_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_laddr, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
} while (0)
#define SCTP_DECR_LADDR_COUNT() \
do { \
atomic_subtract_int(&sctppcbinfo.ipi_count_laddr, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
} while (0)
#define SCTP_INCR_RADDR_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_raddr, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
} while (0)
#define SCTP_DECR_RADDR_COUNT() \
do { \
atomic_subtract_int(&sctppcbinfo.ipi_count_raddr,1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1); \
} while (0)
#define SCTP_INCR_CHK_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_chunk, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
} while (0)
#define SCTP_DECR_CHK_COUNT() \
do { \
if(sctppcbinfo.ipi_count_chunk == 0) \
if(SCTP_BASE_INFO(ipi_count_chunk) == 0) \
panic("chunk count to 0?"); \
atomic_subtract_int(&sctppcbinfo.ipi_count_chunk, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
} while (0)
#define SCTP_INCR_READQ_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_readq,1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq),1); \
} while (0)
#define SCTP_DECR_READQ_COUNT() \
do { \
atomic_subtract_int(&sctppcbinfo.ipi_count_readq, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
} while (0)
#define SCTP_INCR_STRMOQ_COUNT() \
do { \
atomic_add_int(&sctppcbinfo.ipi_count_strmoq, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
} while (0)
#define SCTP_DECR_STRMOQ_COUNT() \
do { \
atomic_subtract_int(&sctppcbinfo.ipi_count_strmoq, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
} while (0)

View File

@ -132,6 +132,11 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
#define SCTP_CTR6 CTR6
#endif
#define SCTP_BASE_INFO(__m) system_base_info.sctppcbinfo.__m
#define SCTP_BASE_STATS system_base_info.sctpstat
#define SCTP_BASE_STAT(__m) system_base_info.sctpstat.__m
#define SCTP_BASE_SYSCTL(__m) system_base_info.sctpsysctl.__m
#define SCTP_BASE_VAR(__m) system_base_info.__m
/*
*
@ -143,7 +148,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
#define SCTPDBG(level, params...) \
{ \
do { \
if (sctp_debug_on & level ) { \
if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \
printf(params); \
} \
} while (0); \
@ -151,7 +156,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
#define SCTPDBG_ADDR(level, addr) \
{ \
do { \
if (sctp_debug_on & level ) { \
if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \
sctp_print_address(addr); \
} \
} while (0); \
@ -159,7 +164,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
#define SCTPDBG_PKT(level, iph, sh) \
{ \
do { \
if (sctp_debug_on & level) { \
if (SCTP_BASE_SYSCTL(sctp_debug_on) & level) { \
sctp_print_address_pkt(iph, sh); \
} \
} while (0); \
@ -278,8 +283,6 @@ typedef struct callout sctp_os_timer_t;
#define sctp_get_tick_count() (ticks)
/* The packed define for 64 bit platforms */
#define SCTP_PACKED __attribute__((packed))
#define SCTP_UNUSED __attribute__((unused))
/*
@ -350,6 +353,10 @@ typedef struct callout sctp_os_timer_t;
#define SCTP_GET_HEADER_FOR_OUTPUT(o_pak) 0
#define SCTP_RELEASE_HEADER(m)
#define SCTP_RELEASE_PKT(m) sctp_m_freem(m)
#define SCTP_ENABLE_UDP_CSUM(m) do { \
m->m_pkthdr.csum_flags = CSUM_UDP; \
m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); \
} while (0)
#define SCTP_GET_PKT_VRFID(m, vrf_id) ((vrf_id = SCTP_DEFAULT_VRFID) != SCTP_DEFAULT_VRFID)
@ -405,7 +412,7 @@ typedef struct callout sctp_os_timer_t;
typedef struct route sctp_route_t;
typedef struct rtentry sctp_rtentry_t;
#define SCTP_RTALLOC(ro, vrf_id) in_rtalloc_ign((struct route *)ro, 0UL, vrf_id)
#define SCTP_RTALLOC(ro, vrf_id) rtalloc_ign((struct route *)ro, 0UL)
/* Future zero copy wakeup/send function */
#define SCTP_ZERO_COPY_EVENT(inp, so)

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_bsd_addr.h>
#include <netinet/sctp_input.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
@ -3251,7 +3252,7 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
return (NULL);
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = copy_init;
@ -3271,7 +3272,7 @@ sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
return (NULL);
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = copy_initack;
@ -3345,10 +3346,10 @@ sctp_get_ect(struct sctp_tcb *stcb,
uint8_t this_random;
/* Huh? */
if (sctp_ecn_enable == 0)
if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 0)
return (0);
if (sctp_ecn_nonce == 0)
if (SCTP_BASE_SYSCTL(sctp_ecn_nonce) == 0)
/* no nonce, always return ECT0 */
return (SCTP_ECT0_BIT);
@ -3433,7 +3434,12 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
int ret;
uint32_t vrf_id;
sctp_route_t *ro = NULL;
struct udphdr *udp;
struct udphdr *udp = NULL;
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so = NULL;
#endif
if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
@ -3452,7 +3458,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
/* Calculate the csum and fill in the length of the packet */
sctphdr = mtod(m, struct sctphdr *);
if (sctp_no_csum_on_loopback &&
if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
(stcb) &&
(to->sa_family == AF_INET) &&
(stcb->asoc.loopback_scope)) {
@ -3589,10 +3595,10 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
if (port) {
udp = (struct udphdr *)(ip + 1);
udp->uh_sport = htons(sctp_udp_tunneling_port);
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
udp->uh_ulen = htons(packet_length - sizeof(struct ip));
udp->uh_sum = 0;
udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
}
/*
* If source address selection fails and we find no route
@ -3650,7 +3656,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
* Stop any running T3
* timers here?
*/
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
net);
@ -3699,14 +3705,30 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
return (ENOMEM);
}
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(m, packet_length);
#endif
SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
if (port) {
SCTP_ENABLE_UDP_CSUM(o_pak);
}
/* send it out. table id is taken from stcb */
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
so = SCTP_INP_SO(inp);
SCTP_SOCKET_UNLOCK(so, 0);
}
#endif
SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 0);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
}
#endif
SCTP_STAT_INCR(sctps_sendpackets);
SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
if (ret)
@ -3934,7 +3956,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
if (port) {
udp = (struct udphdr *)(ip6h + 1);
udp->uh_sport = htons(sctp_udp_tunneling_port);
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
udp->uh_sum = 0;
@ -3970,15 +3992,32 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
return (ENOMEM);
}
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(m, packet_length);
#endif
SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
if (port) {
if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
udp->uh_sum = 0xffff;
}
}
/* send it out. table id is taken from stcb */
SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp,
stcb, vrf_id);
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
so = SCTP_INP_SO(inp);
SCTP_SOCKET_UNLOCK(so, 0);
}
#endif
SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 0);
SCTP_TCB_LOCK(stcb);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
}
#endif
if (net) {
/* for link local this must be done */
sin6->sin6_scope_id = prev_scope;
@ -4185,7 +4224,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
stcb->asoc.cookie_preserve_req = 0;
}
/* ECN parameter */
if (sctp_ecn_enable == 1) {
if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
ecn->ph.param_length = htons(sizeof(*ecn));
SCTP_BUF_LEN(m) += sizeof(*ecn);
@ -4209,7 +4248,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
if (!sctp_auth_disable)
if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
p_len = sizeof(*pr_supported) + num_ext;
pr_supported->ph.param_length = htons(p_len);
@ -4217,7 +4256,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
/* ECN nonce: And now tell the peer we support ECN nonce */
if (sctp_ecn_nonce) {
if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
((caddr_t)pr_supported + SCTP_SIZE32(p_len));
ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
@ -4225,7 +4264,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
}
/* add authentication parameters */
if (!sctp_auth_disable) {
if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
struct sctp_auth_random *randp;
struct sctp_auth_hmac_algo *hmacs;
struct sctp_auth_chunk_list *chunks;
@ -5299,7 +5338,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
}
/* ECN parameter */
if (sctp_ecn_enable == 1) {
if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
ecn->ph.param_length = htons(sizeof(*ecn));
SCTP_BUF_LEN(m) += sizeof(*ecn);
@ -5325,7 +5364,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
if (!sctp_auth_disable)
if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
p_len = sizeof(*pr_supported) + num_ext;
pr_supported->ph.param_length = htons(p_len);
@ -5333,7 +5372,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
/* ECN nonce: And now tell the peer we support ECN nonce */
if (sctp_ecn_nonce) {
if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
((caddr_t)pr_supported + SCTP_SIZE32(p_len));
ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
@ -5341,7 +5380,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
}
/* add authentication parameters */
if (!sctp_auth_disable) {
if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
struct sctp_auth_random *randp;
struct sctp_auth_hmac_algo *hmacs;
struct sctp_auth_chunk_list *chunks;
@ -5868,7 +5907,7 @@ sctp_copy_mbufchain(struct mbuf *clonechain,
appendchain = clonechain;
} else {
if (!copy_by_ref &&
(sizeofcpy <= (int)((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))
(sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
) {
/* Its not in a cluster */
if (*endofchain == NULL) {
@ -5946,7 +5985,7 @@ sctp_copy_mbufchain(struct mbuf *clonechain,
/* copy the old fashion way */
appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = appendchain;
@ -6046,7 +6085,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
return;
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = m;
@ -6187,7 +6226,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
}
}
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
(stcb->asoc.total_flight > 0) &&
@ -6463,7 +6502,7 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
data_list[i]->sent = SCTP_DATAGRAM_SENT;
data_list[i]->snd_count = 1;
data_list[i]->rec.data.chunk_was_revoked = 0;
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
data_list[i]->whoTo->flight_size,
data_list[i]->book_size,
@ -6472,12 +6511,12 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
}
sctp_flight_size_increase(data_list[i]);
sctp_total_flight_increase(stcb, data_list[i]);
if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
}
asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
(uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh));
(uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -6566,7 +6605,7 @@ sctp_can_we_split_this(struct sctp_tcb *stcb,
return (length);
}
if ((length <= goal_mtu) ||
((length - goal_mtu) < sctp_min_residual)) {
((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
/* Sub-optimial residual don't split in non-eeor mode. */
return (0);
}
@ -6574,7 +6613,7 @@ sctp_can_we_split_this(struct sctp_tcb *stcb,
* If we reach here length is larger than the goal_mtu. Do we wish
* to split it for the sake of packet putting together?
*/
if (goal_mtu >= min(sctp_min_split_point, frag_point)) {
if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
/* Its ok to split it */
return (min(goal_mtu, frag_point));
}
@ -6805,7 +6844,7 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
goto out_of;
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = chk->data;
@ -6951,7 +6990,7 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
atomic_add_int(&chk->whoTo->ref_count, 1);
chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
if (sctp_logging_level & SCTP_LOG_AT_SEND_2_OUTQ) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
(uintptr_t) stcb, sp->length,
(uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
@ -7131,7 +7170,7 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
if (sp == NULL) {
break;
}
if ((sp->net != net) && (sctp_cmt_on_off == 0)) {
if ((sp->net != net) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
/* none for this network */
if (locked) {
break;
@ -7189,11 +7228,11 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
*quit_now = 1;
if (total_moved == 0) {
if ((sctp_cmt_on_off == 0) &&
if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) &&
(net == stcb->asoc.primary_destination)) {
/* ran dry for primary network net */
SCTP_STAT_INCR(sctps_primary_randry);
} else if (sctp_cmt_on_off) {
} else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
/* ran dry with CMT on */
SCTP_STAT_INCR(sctps_cmt_randry);
}
@ -7226,7 +7265,7 @@ sctp_move_to_an_alt(struct sctp_tcb *stcb,
* destination using the PF algorithm for finding alternate
* destinations.
*/
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
a_net = sctp_find_alternate_net(stcb, net, 2);
} else {
a_net = sctp_find_alternate_net(stcb, net, 0);
@ -7340,7 +7379,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
}
}
if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
if (sctp_cmt_on_off) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
/*
* for CMT we start at the next one past the one we
* last added data to.
@ -7392,10 +7431,12 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* JRI: if dest is in PF state, do not send data to
* it
*/
if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF)) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
(net->dest_state & SCTP_ADDR_PF)) {
continue;
}
if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) {
if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) && (net->ref_count < 2)) {
/* nothing can be in queue for this guy */
continue;
}
@ -7415,7 +7456,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* copied out of the stream buffers. Note mostly
* copy by reference (we hope).
*/
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
}
sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
@ -7492,7 +7533,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
ifp = net->ro.ro_rt->rt_ifp;
if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
SCTP_STAT_INCR(sctps_ifnomemqueued);
if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
}
continue;
@ -8138,8 +8179,11 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* restart it.
*/
sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
} else if (sctp_cmt_on_off && sctp_cmt_pf && pf_hbflag && ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)
&& (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
} else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
pf_hbflag &&
((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
(!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
/*
* JRS 5/14/07 - If a HB has been sent to a
* PF destination and no T3 timer is
@ -8243,7 +8287,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
data_list[0]->do_rtt = 1;
SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
if (sctp_early_fr) {
if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
if (net->flight_size < net->cwnd) {
/* start or restart it */
if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
@ -8266,7 +8310,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
break;
}
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
}
}
@ -8280,7 +8324,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* At the end there should be no NON timed chunks hanging on this
* queue.
*/
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
}
if ((*num_out == 0) && (*reason_code == 0)) {
@ -8383,7 +8427,7 @@ sctp_send_cookie_echo(struct mbuf *m,
return (-2);
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = cookie;
@ -8457,7 +8501,7 @@ sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
return;
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = outchain;
@ -8729,7 +8773,7 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb)
return;
}
#ifdef SCTP_MBUF_LOGGING
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
struct mbuf *mat;
mat = m_ack;
@ -8929,11 +8973,12 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
/* No, not sent to this net or not ready for rtx */
continue;
}
if ((sctp_max_retran_chunk) && (chk->snd_count >= sctp_max_retran_chunk)) {
if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
(chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
/* Gak, we have exceeded max unlucky retran, abort! */
SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
chk->snd_count,
sctp_max_retran_chunk);
SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
atomic_add_int(&stcb->asoc.refcnt, 1);
sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
SCTP_TCB_LOCK(stcb);
@ -9191,15 +9236,15 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
} else {
if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
}
asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
(uint32_t) (data_list[i]->send_size +
sctp_peer_chunk_oh));
SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
}
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
data_list[i]->whoTo->flight_size,
data_list[i]->book_size,
@ -9232,7 +9277,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
}
}
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
}
#ifdef SCTP_AUDITING_ENABLED
@ -9436,8 +9481,9 @@ sctp_chunk_output(struct sctp_inpcb *inp,
*/
if (net->ref_count > 1)
sctp_move_to_an_alt(stcb, asoc, net);
} else if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
} else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
/*
* JRS 5/14/07 - If CMT PF is on and the current
* destination is in PF state, move all queued data
@ -9451,7 +9497,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
* { burst_limit = asoc->max_burst *
* SCTP_SAT_NETWORK_BURST_INCR; }
*/
if (sctp_use_cwnd_based_maxburst) {
if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
/*
* JRS - Use the congestion control
@ -9459,7 +9505,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
* module
*/
asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit);
if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
}
SCTP_STAT_INCR(sctps_maxburstqueued);
@ -9482,10 +9528,10 @@ sctp_chunk_output(struct sctp_inpcb *inp,
&now, &now_filled, frag_point, so_locked);
if (error) {
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
}
@ -9495,7 +9541,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
tot_out += num_out;
burst_cnt++;
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
if (num_out == 0) {
sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
@ -9508,8 +9554,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
* flight we stop.
*/
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count)
* sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
(stcb->asoc.total_flight > 0)) {
break;
@ -9525,21 +9570,21 @@ sctp_chunk_output(struct sctp_inpcb *inp,
/* Nothing left to send */
break;
}
} while (num_out && (sctp_use_cwnd_based_maxburst ||
} while (num_out && (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
(burst_cnt < burst_limit)));
if (sctp_use_cwnd_based_maxburst == 0) {
if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
if (burst_cnt >= burst_limit) {
SCTP_STAT_INCR(sctps_maxburstqueued);
asoc->burst_limit_applied = 1;
if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
}
} else {
asoc->burst_limit_applied = 0;
}
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
}
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
@ -9885,14 +9930,14 @@ sctp_send_sack(struct sctp_tcb *stcb)
sack = mtod(a_chk->data, struct sctp_sack_chunk *);
sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
/* 0x01 is used by nonce for ecn */
if ((sctp_ecn_enable) &&
(sctp_ecn_nonce) &&
if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
(SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
(asoc->peer_supports_ecn_nonce))
sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
else
sack->ch.chunk_flags = 0;
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*-
* CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
* received, then set high bit to 1, else 0. Reset
@ -10146,7 +10191,7 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
struct mbuf *o_pak;
struct mbuf *mout;
struct ip *iph, *iph_out;
struct udphdr *udp;
struct udphdr *udp = NULL;
#ifdef INET6
struct ip6_hdr *ip6, *ip6_out;
@ -10239,10 +10284,10 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
}
if (port) {
udp = (struct udphdr *)comp_cp;
udp->uh_sport = htons(sctp_udp_tunneling_port);
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
udp->uh_sum = 0;
udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
offset_out += sizeof(struct udphdr);
comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
}
@ -10272,11 +10317,13 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
/* set IPv4 length */
iph_out->ip_len = mlen;
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, mlen);
#endif
SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
if (port) {
SCTP_ENABLE_UDP_CSUM(o_pak);
}
/* out it goes */
SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
@ -10294,10 +10341,16 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
bzero(&ro, sizeof(ro));
mlen = SCTP_BUF_LEN(mout);
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, mlen);
#endif
SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
if (port) {
if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr),
sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr))) == 0) {
udp->uh_sum = 0xffff;
}
}
SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
/* Free the route if we got one back */
@ -10499,7 +10552,7 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
* heartbeat is being sent is in PF state, do NOT do threshold
* management.
*/
if ((sctp_cmt_pf == 0) || ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
if ((SCTP_BASE_SYSCTL(sctp_cmt_pf) == 0) || ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
/* ok we have a destination that needs a beat */
/* lets do the theshold management Qiaobing style */
if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
@ -11195,7 +11248,7 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
udp = (struct udphdr *)abm;
if (port) {
udp->uh_sport = htons(sctp_udp_tunneling_port);
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
/* set udp->uh_ulen later */
udp->uh_sum = 0;
@ -11255,6 +11308,7 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
bzero(&ro, sizeof ro);
if (port) {
udp->uh_ulen = htons(len - sizeof(struct ip));
udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
}
SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
@ -11262,10 +11316,13 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
iph_out->ip_len = len;
/* out it goes */
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, len);
#endif
SCTP_ATTACH_CHAIN(o_pak, mout, len);
if (port) {
SCTP_ENABLE_UDP_CSUM(o_pak);
}
SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
/* Free the route if we got one back */
@ -11288,10 +11345,15 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
ip6_out->ip6_plen = len - sizeof(*ip6_out);
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, len);
#endif
SCTP_ATTACH_CHAIN(o_pak, mout, len);
if (port) {
if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
udp->uh_sum = 0xffff;
}
}
SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
/* Free the route if we got one back */
@ -11313,7 +11375,7 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
struct sctphdr *ohdr;
struct sctp_chunkhdr *ophdr;
struct ip *iph;
struct udphdr *udp;
struct udphdr *udp = NULL;
struct mbuf *mout;
#ifdef SCTP_DEBUG
@ -11413,17 +11475,19 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
out->ip_len = len;
if (port) {
udp = (struct udphdr *)(out + 1);
udp->uh_sport = htons(sctp_udp_tunneling_port);
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
udp->uh_ulen = htons(len - sizeof(struct ip));
udp->uh_sum = 0;
udp->uh_sum = in_pseudo(out->ip_src.s_addr, out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
}
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, len);
#endif
SCTP_ATTACH_CHAIN(o_pak, mout, len);
if (port) {
SCTP_ENABLE_UDP_CSUM(o_pak);
}
SCTP_IP_OUTPUT(retcode, o_pak, &ro, stcb, vrf_id);
SCTP_STAT_INCR(sctps_sendpackets);
@ -11464,7 +11528,7 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
out6->ip6_plen = len - sizeof(struct ip6_hdr);
if (port) {
udp = (struct udphdr *)(out6 + 1);
udp->uh_sport = htons(sctp_udp_tunneling_port);
udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
udp->uh_dport = port;
udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
udp->uh_sum = 0;
@ -11486,10 +11550,15 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&fsa6);
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, len);
#endif
SCTP_ATTACH_CHAIN(o_pak, mout, len);
if (port) {
if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
udp->uh_sum = 0xffff;
}
}
SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
SCTP_STAT_INCR(sctps_sendpackets);
@ -12090,10 +12159,8 @@ sctp_lower_sosend(struct socket *so,
hold_tcblock = 1;
}
inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
if ((SCTP_SB_LIMIT_SND(so) <
(sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
(stcb->asoc.chunks_on_out_queue >
sctp_max_chunks_on_queue)) {
if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
(stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
if (sndlen > SCTP_SB_LIMIT_SND(so))
error = EMSGSIZE;
@ -12176,7 +12243,7 @@ sctp_lower_sosend(struct socket *so,
error = EINVAL;
goto out_unlocked;
}
if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) {
if ((net->flight_size > net->cwnd) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
/*-
* CMT: Added check for CMT above. net above is the primary
* dest. If CMT is ON, sender should always attempt to send
@ -12194,7 +12261,7 @@ sctp_lower_sosend(struct socket *so,
asoc->ifp_had_enobuf = 0;
} else {
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
}
/* Are we aborting? */
if (srcv->sinfo_flags & SCTP_ABORT) {
@ -12344,7 +12411,7 @@ sctp_lower_sosend(struct socket *so,
goto out_unlocked;
}
if (user_marks_eor) {
local_add_more = sctp_add_more_threshold;
local_add_more = SCTP_BASE_SYSCTL(sctp_add_more_threshold);
} else {
/*-
* For non-eeor the whole message must fit in
@ -12358,14 +12425,14 @@ sctp_lower_sosend(struct socket *so,
}
if (((max_len <= local_add_more) &&
(SCTP_SB_LIMIT_SND(so) > local_add_more)) ||
((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) > sctp_max_chunks_on_queue)) { /* if */
/* No room right no ! */
((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { /* if */
/* No room right now ! */
SOCKBUF_LOCK(&so->so_snd);
inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + sctp_add_more_threshold)) ||
((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) > sctp_max_chunks_on_queue /* while */ )) {
while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) /* while */ )) {
if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
so, asoc, sndlen);
}
@ -12384,7 +12451,7 @@ sctp_lower_sosend(struct socket *so,
SOCKBUF_UNLOCK(&so->so_snd);
goto out_unlocked;
}
if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
so, asoc, stcb->asoc.total_output_queue_size);
}
@ -12462,7 +12529,7 @@ sctp_lower_sosend(struct socket *so,
atomic_add_int(&asoc->stream_queue_cnt, 1);
if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
sp->strseq = strm->next_sequence_sent;
if (sctp_logging_level & SCTP_LOG_AT_SEND_2_SCTP) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
(uintptr_t) stcb, sp->length,
(uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
@ -12503,8 +12570,8 @@ sctp_lower_sosend(struct socket *so,
else
max_len = 0;
if ((max_len > sctp_add_more_threshold) ||
(max_len && (SCTP_SB_LIMIT_SND(so) < sctp_add_more_threshold)) ||
if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
(max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
(uio->uio_resid &&
(uio->uio_resid <= (int)max_len))) {
sndout = 0;
@ -12592,7 +12659,7 @@ sctp_lower_sosend(struct socket *so,
goto skip_out_eof;
}
if ((net->flight_size > net->cwnd) &&
(sctp_cmt_on_off == 0)) {
(SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
queue_only = 1;
} else if (asoc->ifp_had_enobuf) {
SCTP_STAT_INCR(sctps_ifnomemqueued);
@ -12603,12 +12670,10 @@ sctp_lower_sosend(struct socket *so,
}
asoc->ifp_had_enobuf = 0;
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
} else {
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) {
queue_only = 1;
SCTP_STAT_INCR(sctps_send_burst_avoid);
@ -12630,13 +12695,13 @@ sctp_lower_sosend(struct socket *so,
* Don't send anything and let SACKs drive out the
* data unless wen have a "full" segment to send.
*/
if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
}
SCTP_STAT_INCR(sctps_naglequeued);
nagle_applies = 1;
} else {
if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
}
@ -12644,7 +12709,7 @@ sctp_lower_sosend(struct socket *so,
nagle_applies = 0;
}
/* What about the INIT, send it maybe */
if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
nagle_applies, un_sent);
@ -12710,9 +12775,9 @@ sctp_lower_sosend(struct socket *so,
* wakeup flag in place.
*/
if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
min(sctp_add_more_threshold, SCTP_SB_LIMIT_SND(so)))
min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))
) {
if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
so, asoc, uio->uio_resid);
}
@ -12732,7 +12797,7 @@ sctp_lower_sosend(struct socket *so,
SOCKBUF_UNLOCK(&so->so_snd);
goto out_unlocked;
}
if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
so, asoc, stcb->asoc.total_output_queue_size);
}
@ -12875,7 +12940,7 @@ sctp_lower_sosend(struct socket *so,
some_on_control = 1;
}
if ((net->flight_size > net->cwnd) &&
(sctp_cmt_on_off == 0)) {
(SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
queue_only = 1;
} else if (asoc->ifp_had_enobuf) {
SCTP_STAT_INCR(sctps_ifnomemqueued);
@ -12886,12 +12951,10 @@ sctp_lower_sosend(struct socket *so,
}
asoc->ifp_had_enobuf = 0;
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
} else {
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
sizeof(struct sctp_data_chunk)));
(stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) {
queue_only = 1;
SCTP_STAT_INCR(sctps_send_burst_avoid);
@ -12912,13 +12975,13 @@ sctp_lower_sosend(struct socket *so,
* Don't send anything and let SACKs drive out the
* data unless wen have a "full" segment to send.
*/
if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
}
SCTP_STAT_INCR(sctps_naglequeued);
nagle_applies = 1;
} else {
if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
}
@ -13035,7 +13098,7 @@ sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
return (m);
/* sysctl disabled auth? */
if (sctp_auth_disable)
if (SCTP_BASE_SYSCTL(sctp_auth_disable))
return (m);
/* peer doesn't do auth... */

View File

@ -48,9 +48,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/udp.h>
void sctp_pcb_finish(void);
struct sctp_epinfo sctppcbinfo;
struct sctp_base_info system_base_info;
/* FIX: we don't handle multiple link local scopes */
/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
@ -81,14 +79,14 @@ sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
* does not hurt.
*/
SCTP_INP_INFO_RLOCK();
spcb->ep_count = sctppcbinfo.ipi_count_ep;
spcb->asoc_count = sctppcbinfo.ipi_count_asoc;
spcb->laddr_count = sctppcbinfo.ipi_count_laddr;
spcb->raddr_count = sctppcbinfo.ipi_count_raddr;
spcb->chk_count = sctppcbinfo.ipi_count_chunk;
spcb->readq_count = sctppcbinfo.ipi_count_readq;
spcb->stream_oque = sctppcbinfo.ipi_count_strmoq;
spcb->free_chunks = sctppcbinfo.ipi_free_chunks;
spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep);
spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc);
spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr);
spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr);
spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk);
spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq);
spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq);
spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks);
SCTP_INP_INFO_RUNLOCK();
}
@ -179,9 +177,9 @@ sctp_allocate_vrf(int vrf_id)
return (NULL);
}
/* Add it to the hash table */
bucket = &sctppcbinfo.sctp_vrfhash[(vrf_id & sctppcbinfo.hashvrfmark)];
bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
LIST_INSERT_HEAD(bucket, vrf, next_vrf);
atomic_add_int(&sctppcbinfo.ipi_count_vrfs, 1);
atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
return (vrf);
}
@ -196,7 +194,7 @@ sctp_find_ifn(void *ifn, uint32_t ifn_index)
* We assume the lock is held for the addresses if thats wrong
* problems could occur :-)
*/
hash_ifn_head = &sctppcbinfo.vrf_ifn_hash[(ifn_index & sctppcbinfo.vrf_ifn_hashmark)];
hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) {
if (sctp_ifnp->ifn_index == ifn_index) {
return (sctp_ifnp);
@ -216,7 +214,7 @@ sctp_find_vrf(uint32_t vrf_id)
struct sctp_vrflist *bucket;
struct sctp_vrf *liste;
bucket = &sctppcbinfo.sctp_vrfhash[(vrf_id & sctppcbinfo.hashvrfmark)];
bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
LIST_FOREACH(liste, bucket, next_vrf) {
if (vrf_id == liste->vrf_id) {
return (liste);
@ -239,7 +237,7 @@ sctp_free_vrf(struct sctp_vrf *vrf)
/* We zero'd the count */
LIST_REMOVE(vrf, next_vrf);
SCTP_FREE(vrf, SCTP_M_VRF);
atomic_subtract_int(&sctppcbinfo.ipi_count_vrfs, 1);
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
}
}
@ -255,7 +253,7 @@ sctp_free_ifn(struct sctp_ifn *sctp_ifnp)
sctp_free_vrf(sctp_ifnp->vrf);
}
SCTP_FREE(sctp_ifnp, SCTP_M_IFN);
atomic_subtract_int(&sctppcbinfo.ipi_count_ifns, 1);
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
}
}
@ -283,7 +281,7 @@ sctp_free_ifa(struct sctp_ifa *sctp_ifap)
sctp_free_ifn(sctp_ifap->ifn_p);
}
SCTP_FREE(sctp_ifap, SCTP_M_IFA);
atomic_subtract_int(&sctppcbinfo.ipi_count_ifas, 1);
atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
}
}
@ -545,12 +543,12 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
} else {
memcpy(sctp_ifnp->ifn_name, "unknown", min(7, SCTP_IFNAMSIZ));
}
hash_ifn_head = &sctppcbinfo.vrf_ifn_hash[(ifn_index & sctppcbinfo.vrf_ifn_hashmark)];
hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
LIST_INIT(&sctp_ifnp->ifalist);
SCTP_IPI_ADDR_WLOCK();
LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket);
LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn);
atomic_add_int(&sctppcbinfo.ipi_count_ifns, 1);
atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
new_ifn_af = 1;
}
sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
@ -672,7 +670,7 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
sctp_ifnp->ifa_count++;
vrf->total_ifa_count++;
atomic_add_int(&sctppcbinfo.ipi_count_ifas, 1);
atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
if (new_ifn_af) {
SCTP_REGISTER_INTERFACE(ifn_index, new_ifn_af);
sctp_ifnp->registered_af = new_ifn_af;
@ -686,7 +684,7 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
struct sctp_laddr *wi;
atomic_add_int(&sctp_ifap->refcount, 1);
wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
if (wi == NULL) {
/*
* Gak, what can we do? We have lost an address
@ -708,7 +706,7 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
* Should this really be a tailq? As it is we will process
* the newest first :-0
*/
LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
SCTP_IPI_ITERATOR_WQ_UNLOCK();
sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
(struct sctp_inpcb *)NULL,
@ -800,7 +798,7 @@ sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr,
if (sctp_ifap) {
struct sctp_laddr *wi;
wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
if (wi == NULL) {
/*
* Gak, what can we do? We have lost an address
@ -822,7 +820,7 @@ sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr,
* Should this really be a tailq? As it is we will process
* the newest first :-0
*/
LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
SCTP_IPI_ITERATOR_WQ_UNLOCK();
sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
@ -862,8 +860,8 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
} else {
return NULL;
}
ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR(
(lport + rport), sctppcbinfo.hashtcpmark)];
ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(
(lport + rport), SCTP_BASE_INFO(hashtcpmark))];
/*
* Ok now for each of the guys in this bucket we must look and see:
* - Does the remote port match. - Does there single association's
@ -1312,8 +1310,8 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int
}
SCTP_INP_INFO_RLOCK();
id = (uint32_t) asoc_id;
head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id,
sctppcbinfo.hashasocmark)];
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(id,
SCTP_BASE_INFO(hashasocmark))];
if (head == NULL) {
/* invalid id TSNH */
SCTP_INP_INFO_RUNLOCK();
@ -1349,7 +1347,7 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int
SCTP_INP_RUNLOCK(stcb->sctp_ep);
}
/* Ok if we missed here, lets try the restart hash */
head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashrestartmark)];
head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(id, SCTP_BASE_INFO(hashrestartmark))];
if (head == NULL) {
/* invalid id TSNH */
SCTP_INP_INFO_RUNLOCK();
@ -1590,8 +1588,8 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
if (have_lock == 0) {
SCTP_INP_INFO_RLOCK();
}
head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
sctppcbinfo.hashmark)];
head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
SCTP_BASE_INFO(hashmark))];
inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
/*
@ -1604,14 +1602,14 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
if (inp == NULL && find_tcp_pool) {
unsigned int i;
for (i = 0; i < sctppcbinfo.hashtblsize; i++) {
for (i = 0; i < SCTP_BASE_INFO(hashtblsize); i++) {
/*
* This is real gross, but we do NOT have a remote
* port at this point depending on who is calling.
* We must therefore look for ANY one that matches
* our local port :/
*/
head = &sctppcbinfo.sctp_tcpephash[i];
head = &SCTP_BASE_INFO(sctp_tcpephash)[i];
if (LIST_FIRST(head)) {
inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
if (inp) {
@ -1781,8 +1779,8 @@ sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
*netp = NULL;
*inp_p = NULL;
SCTP_INP_INFO_RLOCK();
head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
sctppcbinfo.hashasocmark)];
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag,
SCTP_BASE_INFO(hashasocmark))];
if (head == NULL) {
/* invalid vtag */
SCTP_INP_INFO_RUNLOCK();
@ -2161,7 +2159,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
error = 0;
SCTP_INP_INFO_WLOCK();
inp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep, struct sctp_inpcb);
inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb);
if (inp == NULL) {
SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n");
SCTP_INP_INFO_WUNLOCK();
@ -2189,7 +2187,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
}
if (error != 0) {
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
SCTP_INP_INFO_WUNLOCK();
return error;
}
@ -2219,25 +2217,25 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
* in protosw
*/
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
return (EOPNOTSUPP);
}
if (sctp_default_frag_interleave == SCTP_FRAG_LEVEL_1) {
if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) {
sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
} else if (sctp_default_frag_interleave == SCTP_FRAG_LEVEL_2) {
} else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) {
sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
} else if (sctp_default_frag_interleave == SCTP_FRAG_LEVEL_0) {
} else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) {
sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
}
inp->sctp_tcbhash = SCTP_HASH_INIT(sctp_pcbtblsize,
inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize),
&inp->sctp_hashmark);
if (inp->sctp_tcbhash == NULL) {
SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n");
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
return (ENOBUFS);
}
inp->def_vrf_id = vrf_id;
@ -2251,7 +2249,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
SCTP_INP_WLOCK(inp);
/* add it to the info area */
LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list);
LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list);
SCTP_INP_INFO_WUNLOCK();
TAILQ_INIT(&inp->read_queue);
@ -2273,35 +2271,35 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
/* setup the base timeout information */
m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default);
m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(sctp_heartbeat_interval_default);
m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default);
m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default);
m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default);
m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default));
m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default));
m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default));
m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default));
m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default));
/* all max/min max are in ms */
m->sctp_maxrto = sctp_rto_max_default;
m->sctp_minrto = sctp_rto_min_default;
m->initial_rto = sctp_rto_initial_default;
m->initial_init_rto_max = sctp_init_rto_max_default;
m->sctp_sack_freq = sctp_sack_freq_default;
m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default);
m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default);
m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default);
m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default);
m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default);
m->max_open_streams_intome = MAX_SCTP_STREAMS;
m->max_init_times = sctp_init_rtx_max_default;
m->max_send_times = sctp_assoc_rtx_max_default;
m->def_net_failure = sctp_path_rtx_max_default;
m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default);
m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default);
m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default);
m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
m->max_burst = sctp_max_burst_default;
if ((sctp_default_cc_module >= SCTP_CC_RFC2581) &&
(sctp_default_cc_module <= SCTP_CC_HTCP)) {
m->sctp_default_cc_module = sctp_default_cc_module;
m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default);
if ((SCTP_BASE_SYSCTL(sctp_default_cc_module) >= SCTP_CC_RFC2581) &&
(SCTP_BASE_SYSCTL(sctp_default_cc_module) <= SCTP_CC_HTCP)) {
m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module);
} else {
/* sysctl done with invalid value, set to 2581 */
m->sctp_default_cc_module = SCTP_CC_RFC2581;
}
/* number of streams to pre-open on a association */
m->pre_open_stream_count = sctp_nr_outgoing_streams_default;
m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default);
/* Add adaptation cookie */
m->adaptation_layer_indicator = 0x504C5253;
@ -2327,7 +2325,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
/* How long is a cookie good for ? */
m->def_cookie_life = MSEC_TO_TICKS(sctp_valid_cookie_life_default);
m->def_cookie_life = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default));
/*
* Initialize authentication parameters
*/
@ -2385,8 +2383,8 @@ sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
LIST_REMOVE(stcb, sctp_tcblist);
/* Now insert the new_inp into the TCP connected hash */
head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport),
sctppcbinfo.hashtcpmark)];
head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport + rport),
SCTP_BASE_INFO(hashtcpmark))];
LIST_INSERT_HEAD(head, new_inp, sctp_hash);
/* Its safe to access */
@ -2414,7 +2412,7 @@ sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
/* Subset bound, so copy in the laddr list from the old_inp */
LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
laddr = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
if (laddr == NULL) {
/*
* Gak, what can we do? This assoc is really
@ -2465,8 +2463,8 @@ sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id)
struct sctp_inpcb *t_inp;
int fnd;
head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
sctppcbinfo.hashmark)];
head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
SCTP_BASE_INFO(hashmark))];
LIST_FOREACH(t_inp, head, sctp_hash) {
if (t_inp->sctp_lport != lport) {
continue;
@ -2774,14 +2772,14 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
/* binding to all addresses, so just set in the proper flags */
inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
/* set the automatic addr changes from kernel flag */
if (sctp_auto_asconf == 0) {
if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) {
sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF);
sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
} else {
sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
}
if (sctp_multiple_asconfs == 0) {
if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) {
sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
} else {
sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
@ -2790,7 +2788,7 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
* set the automatic mobility_base from kernel flag (by
* micchie)
*/
if (sctp_mobility_base == 0) {
if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) {
sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE);
sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
} else {
@ -2801,7 +2799,7 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
* set the automatic mobility_fasthandoff from kernel flag
* (by micchie)
*/
if (sctp_mobility_fasthandoff == 0) {
if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) {
sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF);
sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
} else {
@ -2886,8 +2884,8 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
inp->laddr_count++;
}
/* find the bucket */
head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
sctppcbinfo.hashmark)];
head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
SCTP_BASE_INFO(hashmark))];
/* put it in the bucket */
LIST_INSERT_HEAD(head, inp, sctp_hash);
SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d\n",
@ -2921,7 +2919,7 @@ sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_nex
* those guys. The list of iterators should never be very big
* though.
*/
TAILQ_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) {
TAILQ_FOREACH(it, &SCTP_BASE_INFO(iteratorhead), sctp_nxt_itr) {
if (it == inp->inp_starting_point_for_iterator)
/* skip this guy, he's special */
continue;
@ -3215,7 +3213,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
SCTP_PCB_FLAGS_UNBOUND) {
/*
* ok, this guy has been bound. It's port is somewhere in
* the sctppcbinfo hash table. Remove it!
* the SCTP_BASE_INFO(hash table). Remove it!
*/
LIST_REMOVE(inp, sctp_hash);
inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
@ -3327,7 +3325,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
* no need to free the net count, since at this point all
* assoc's are gone.
*/
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), sq);
SCTP_DECR_READQ_COUNT();
}
/* Now the sctp_pcb things */
@ -3397,7 +3395,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
asoc = nasoc) {
nasoc = LIST_NEXT(asoc, sctp_tcblist);
LIST_REMOVE(asoc, sctp_tcblist);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, asoc);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), asoc);
SCTP_DECR_ASOC_COUNT();
}
/* *** END TEMP CODE *** */
@ -3414,7 +3412,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
SCTP_INP_INFO_WUNLOCK();
SCTP_ITERATOR_UNLOCK();
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
SCTP_DECR_EP_COUNT();
}
@ -3570,7 +3568,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
/* not supported family type */
return (-1);
}
net = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net, struct sctp_nets);
net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets);
if (net == NULL) {
return (-1);
}
@ -3612,8 +3610,8 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
stcb->asoc.numnets++;
*(&net->ref_count) = 1;
net->tos_flowlabel = 0;
if (sctp_udp_tunneling_for_client_enable) {
net->port = htons(sctp_udp_tunneling_port);
if (SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable)) {
net->port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
} else {
net->port = 0;
}
@ -3824,7 +3822,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
* sctp_findassociation_ep_addr(ep, addr's); to make sure the
* address does not exist already.
*/
if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) {
if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) {
/* Hit max assoc, sorry no more */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
*error = ENOBUFS;
@ -3906,7 +3904,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
return (NULL);
}
}
stcb = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc, struct sctp_tcb);
stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb);
if (stcb == NULL) {
/* out of memory? */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
@ -3926,7 +3924,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
/* failed */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_DECR_ASOC_COUNT();
*error = err;
return (NULL);
@ -3939,7 +3937,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
/* inpcb freed while alloc going on */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_INP_WUNLOCK(inp);
SCTP_INP_INFO_WUNLOCK();
SCTP_DECR_ASOC_COUNT();
@ -3950,8 +3948,8 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
SCTP_TCB_LOCK(stcb);
/* now that my_vtag is set, add it to the hash */
head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
sctppcbinfo.hashasocmark)];
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
SCTP_BASE_INFO(hashasocmark))];
/* put it in the bucket in the vtag hash of assoc's for the system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
sctp_delete_from_timewait(stcb->asoc.my_vtag);
@ -3971,7 +3969,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
SCTP_DECR_ASOC_COUNT();
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_INP_WUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
*error = ENOBUFS;
@ -4100,7 +4098,7 @@ sctp_delete_from_timewait(uint32_t tag)
int found = 0;
int i;
chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
if (!SCTP_LIST_EMPTY(chain)) {
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
@ -4125,7 +4123,7 @@ sctp_is_in_timewait(uint32_t tag)
int found = 0;
int i;
chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
if (!SCTP_LIST_EMPTY(chain)) {
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
@ -4151,7 +4149,7 @@ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
int set, i;
(void)SCTP_GETTIME_TIMEVAL(&now);
chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
set = 0;
if (!SCTP_LIST_EMPTY(chain)) {
/* Block(s) present, lets find space, and expire on the fly */
@ -4532,7 +4530,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_free_remote_addr(sp->net);
sctp_free_spbufspace(stcb, asoc, sp);
/* Free the zone stuff */
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), sp);
SCTP_DECR_STRMOQ_COUNT();
/* sa_ignore FREED_MEMORY */
sp = TAILQ_FIRST(&outs->outqueue);
@ -4556,7 +4554,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sq->whoFrom = NULL;
sq->stcb = NULL;
/* Free the ctl entry */
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), sq);
SCTP_DECR_READQ_COUNT();
/* sa_ignore FREED_MEMORY */
sq = TAILQ_FIRST(&asoc->pending_reply_queue);
@ -4570,9 +4568,9 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
chk->data = NULL;
}
ccnt++;
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1);
atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1);
asoc->free_chunk_cnt--;
/* sa_ignore FREED_MEMORY */
chk = TAILQ_FIRST(&asoc->free_chunks);
@ -4588,7 +4586,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
/* sa_ignore FREED_MEMORY */
chk = TAILQ_FIRST(&asoc->send_queue);
@ -4611,7 +4609,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
/* sa_ignore FREED_MEMORY */
chk = TAILQ_FIRST(&asoc->sent_queue);
@ -4634,7 +4632,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
/* sa_ignore FREED_MEMORY */
chk = TAILQ_FIRST(&asoc->control_send_queue);
@ -4658,7 +4656,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
/* sa_ignore FREED_MEMORY */
chk = TAILQ_FIRST(&asoc->asconf_send_queue);
@ -4680,7 +4678,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
sctp_free_remote_addr(chk->whoTo);
ccnt++;
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
/* sa_ignore FREED_MEMORY */
chk = TAILQ_FIRST(&asoc->reasmqueue);
@ -4722,7 +4720,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
* since all the net's were freed
* above.
*/
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), ctl);
SCTP_DECR_READQ_COUNT();
ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
}
@ -4736,7 +4734,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
/* sa_ignore FREED_MEMORY */
net = TAILQ_FIRST(&asoc->nets);
/* pull from list */
if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) {
if ((SCTP_BASE_INFO(ipi_count_raddr) == 0) || (prev == net)) {
#ifdef INVARIANTS
panic("no net's left alloc'ed, or list points to itself");
#endif
@ -4767,7 +4765,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
if (aack->data != NULL) {
sctp_m_freem(aack->data);
}
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asconf_ack, aack);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
}
/* clean up auth stuff */
if (asoc->local_hmacs)
@ -4802,13 +4800,13 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
#ifdef SCTP_TRACK_FREED_ASOCS
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
/* now clean up the tasoc itself */
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_DECR_ASOC_COUNT();
} else {
LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist);
}
#else
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_DECR_ASOC_COUNT();
#endif
if (from_inpcbfree == SCTP_NORMAL_PROC) {
@ -5118,7 +5116,7 @@ sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act)
{
struct sctp_laddr *laddr;
laddr = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
if (laddr == NULL) {
/* out of memory? */
SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
@ -5146,7 +5144,7 @@ sctp_remove_laddr(struct sctp_laddr *laddr)
/* remove from the list */
LIST_REMOVE(laddr, sctp_nxt_addr);
sctp_free_ifa(laddr->ifa);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr);
SCTP_DECR_LADDR_COUNT();
}
@ -5192,8 +5190,6 @@ sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
return;
}
static char sctp_pcb_initialized = 0;
/*
* Temporarily remove for __APPLE__ until we use the Tiger equivalents
*/
@ -5211,86 +5207,85 @@ sctp_pcb_init()
int i;
struct timeval tv;
if (sctp_pcb_initialized != 0) {
if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) {
/* error I was called twice */
return;
}
sctp_pcb_initialized = 1;
SCTP_BASE_VAR(sctp_pcb_initialized) = 1;
bzero(&sctpstat, sizeof(struct sctpstat));
#if defined(SCTP_LOCAL_TRACE_BUF)
bzero(&sctp_log, sizeof(struct sctp_log));
bzero(&SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
#endif
(void)SCTP_GETTIME_TIMEVAL(&tv);
sctpstat.sctps_discontinuitytime.tv_sec = (uint32_t) tv.tv_sec;
sctpstat.sctps_discontinuitytime.tv_usec = (uint32_t) tv.tv_usec;
SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t) tv.tv_sec;
SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t) tv.tv_usec;
/* init the empty list of (All) Endpoints */
LIST_INIT(&sctppcbinfo.listhead);
LIST_INIT(&SCTP_BASE_INFO(listhead));
/* init the iterator head */
TAILQ_INIT(&sctppcbinfo.iteratorhead);
TAILQ_INIT(&SCTP_BASE_INFO(iteratorhead));
/* init the hash table of endpoints */
TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &sctp_hashtblsize);
TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize);
TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale);
sctppcbinfo.sctp_asochash = SCTP_HASH_INIT((sctp_hashtblsize * 31),
&sctppcbinfo.hashasocmark);
sctppcbinfo.sctp_ephash = SCTP_HASH_INIT(sctp_hashtblsize,
&sctppcbinfo.hashmark);
sctppcbinfo.sctp_tcpephash = SCTP_HASH_INIT(sctp_hashtblsize,
&sctppcbinfo.hashtcpmark);
sctppcbinfo.hashtblsize = sctp_hashtblsize;
TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize));
TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize));
TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale));
SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31),
&SCTP_BASE_INFO(hashasocmark));
SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
&SCTP_BASE_INFO(hashmark));
SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
&SCTP_BASE_INFO(hashtcpmark));
SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize);
/* init the small hash table we use to track restarted asoc's */
sctppcbinfo.sctp_restarthash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE,
&sctppcbinfo.hashrestartmark);
SCTP_BASE_INFO(sctp_restarthash) = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE,
&SCTP_BASE_INFO(hashrestartmark));
sctppcbinfo.sctp_vrfhash = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
&sctppcbinfo.hashvrfmark);
SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
&SCTP_BASE_INFO(hashvrfmark));
sctppcbinfo.vrf_ifn_hash = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
&sctppcbinfo.vrf_ifn_hashmark);
SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
&SCTP_BASE_INFO(vrf_ifn_hashmark));
/* init the zones */
/*
* FIX ME: Should check for NULL returns, but if it does fail we are
* doomed to panic anyways... add later maybe.
*/
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep",
sizeof(struct sctp_inpcb), maxsockets);
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc",
sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr",
sizeof(struct sctp_laddr),
(sctp_max_number_of_assoc * sctp_scale_up_for_address));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr",
sizeof(struct sctp_nets),
(sctp_max_number_of_assoc * sctp_scale_up_for_address));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk",
sizeof(struct sctp_tmit_chunk),
(sctp_max_number_of_assoc * sctp_chunkscale));
(sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_readq, "sctp_readq",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq",
sizeof(struct sctp_queued_to_read),
(sctp_max_number_of_assoc * sctp_chunkscale));
(sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_strmoq, "sctp_stream_msg_out",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out",
sizeof(struct sctp_stream_queue_pending),
(sctp_max_number_of_assoc * sctp_chunkscale));
(sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asconf, "sctp_asconf",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf",
sizeof(struct sctp_asconf),
(sctp_max_number_of_assoc * sctp_chunkscale));
(sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asconf_ack, "sctp_asconf_ack",
SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack",
sizeof(struct sctp_asconf_ack),
(sctp_max_number_of_assoc * sctp_chunkscale));
(sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
/* Master Lock INIT for info structure */
@ -5304,37 +5299,37 @@ sctp_pcb_init()
#ifdef SCTP_PACKET_LOGGING
SCTP_IP_PKTLOG_INIT();
#endif
LIST_INIT(&sctppcbinfo.addr_wq);
LIST_INIT(&SCTP_BASE_INFO(addr_wq));
/* not sure if we need all the counts */
sctppcbinfo.ipi_count_ep = 0;
SCTP_BASE_INFO(ipi_count_ep) = 0;
/* assoc/tcb zone info */
sctppcbinfo.ipi_count_asoc = 0;
SCTP_BASE_INFO(ipi_count_asoc) = 0;
/* local addrlist zone info */
sctppcbinfo.ipi_count_laddr = 0;
SCTP_BASE_INFO(ipi_count_laddr) = 0;
/* remote addrlist zone info */
sctppcbinfo.ipi_count_raddr = 0;
SCTP_BASE_INFO(ipi_count_raddr) = 0;
/* chunk info */
sctppcbinfo.ipi_count_chunk = 0;
SCTP_BASE_INFO(ipi_count_chunk) = 0;
/* socket queue zone info */
sctppcbinfo.ipi_count_readq = 0;
SCTP_BASE_INFO(ipi_count_readq) = 0;
/* stream out queue cont */
sctppcbinfo.ipi_count_strmoq = 0;
SCTP_BASE_INFO(ipi_count_strmoq) = 0;
sctppcbinfo.ipi_free_strmoq = 0;
sctppcbinfo.ipi_free_chunks = 0;
SCTP_BASE_INFO(ipi_free_strmoq) = 0;
SCTP_BASE_INFO(ipi_free_chunks) = 0;
SCTP_OS_TIMER_INIT(&sctppcbinfo.addr_wq_timer.timer);
SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer));
/* Init the TIMEWAIT list */
for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE_A; i++) {
LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
LIST_INIT(&SCTP_BASE_INFO(vtag_timewait[i]));
}
#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
sctppcbinfo.iterator_running = 0;
SCTP_BASE_INFO(iterator_running) = 0;
sctp_startup_iterator();
#endif
@ -5348,7 +5343,7 @@ sctp_pcb_init()
}
/*
* Assumes that the sctppcbinfo lock is NOT held.
* Assumes that the SCTP_BASE_INFO() lock is NOT held.
*/
void
sctp_pcb_finish(void)
@ -5357,13 +5352,16 @@ sctp_pcb_finish(void)
struct sctp_vrf *vrf;
struct sctp_ifn *ifn;
struct sctp_ifa *ifa;
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block, *prev_twait_block;
int i;
/* FIXME MT */
/*
* free the vrf/ifn/ifa lists and hashes (be sure address monitor is
* destroyed first).
*/
vrf_bucket = &sctppcbinfo.sctp_vrfhash[(SCTP_DEFAULT_VRFID & sctppcbinfo.hashvrfmark)];
vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))];
vrf = LIST_FIRST(vrf_bucket);
while (vrf) {
ifn = LIST_FIRST(&vrf->ifnlist);
@ -5389,8 +5387,26 @@ sctp_pcb_finish(void)
vrf = LIST_FIRST(vrf_bucket);
}
/* free the vrf hashes */
SCTP_HASH_FREE(sctppcbinfo.sctp_vrfhash, sctppcbinfo.hashvrfmark);
SCTP_HASH_FREE(sctppcbinfo.vrf_ifn_hash, sctppcbinfo.vrf_ifn_hashmark);
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark));
SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark));
/*
* free the TIMEWAIT list elements malloc'd in the function
* sctp_add_vtag_to_timewait()...
*/
for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE_A; i++) {
chain = &SCTP_BASE_INFO(vtag_timewait)[i];
if (!SCTP_LIST_EMPTY(chain)) {
prev_twait_block = NULL;
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
if (prev_twait_block) {
SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
}
prev_twait_block = twait_block;
}
SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
}
}
/* free the locks and mutexes */
#ifdef SCTP_PACKET_LOGGING
@ -5403,15 +5419,15 @@ sctp_pcb_finish(void)
SCTP_STATLOG_DESTROY();
SCTP_INP_INFO_LOCK_DESTROY();
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_ep);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_asoc);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_laddr);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_net);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_chunk);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_readq);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_strmoq);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_asconf);
SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_asconf_ack);
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf));
SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack));
}
@ -5969,7 +5985,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
/* peer does not support auth but sent a chunks list? */
return (-31);
}
if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf &&
if (!SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) && stcb->asoc.peer_supports_asconf &&
!stcb->asoc.peer_supports_auth) {
/* peer supports asconf but not auth? */
return (-32);
@ -6076,11 +6092,11 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
int i;
SCTP_INP_INFO_WLOCK();
chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
chain = &SCTP_BASE_INFO(vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE))];
/* First is the vtag in use ? */
head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag,
sctppcbinfo.hashasocmark)];
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
SCTP_BASE_INFO(hashasocmark))];
if (head == NULL) {
goto check_restart;
}
@ -6101,8 +6117,8 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
}
check_restart:
/* Now lets check the restart hash */
head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(tag,
sctppcbinfo.hashrestartmark)];
head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(tag,
SCTP_BASE_INFO(hashrestartmark))];
if (head == NULL) {
goto check_time_wait;
}
@ -6177,7 +6193,7 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
/* We look for anything larger than the cum-ack + 1 */
SCTP_STAT_INCR(sctps_protocol_drain_calls);
if (sctp_do_drain == 0) {
if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
return;
}
asoc = &stcb->asoc;
@ -6246,7 +6262,7 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
ctl->data = NULL;
}
sctp_free_remote_addr(ctl->whoFrom);
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), ctl);
SCTP_DECR_READQ_COUNT();
}
ctl = nctl;
@ -6299,6 +6315,9 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
/* found the new highest */
asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 8, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
break;
}
gap--;
@ -6308,6 +6327,9 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
memset(asoc->mapping_array, 0, asoc->mapping_array_size);
asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 9, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
}
asoc->last_revoke_count = cnt;
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
@ -6340,7 +6362,7 @@ sctp_drain()
struct sctp_tcb *stcb;
SCTP_INP_INFO_RLOCK();
LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
/* For each endpoint */
SCTP_INP_RLOCK(inp);
LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
@ -6407,7 +6429,7 @@ sctp_initiate_iterator(inp_func inpf,
it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
} else {
SCTP_INP_INFO_RLOCK();
it->inp = LIST_FIRST(&sctppcbinfo.listhead);
it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead));
SCTP_INP_INFO_RUNLOCK();
it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
@ -6417,9 +6439,9 @@ sctp_initiate_iterator(inp_func inpf,
if (it->inp) {
SCTP_INP_INCR_REF(it->inp);
}
TAILQ_INSERT_TAIL(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
TAILQ_INSERT_TAIL(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
if (sctppcbinfo.iterator_running == 0) {
if (SCTP_BASE_INFO(iterator_running) == 0) {
sctp_wakeup_iterator();
}
SCTP_IPI_ITERATOR_WQ_UNLOCK();

View File

@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_os.h>
#include <netinet/sctp.h>
#include <netinet/sctp_constants.h>
#include <netinet/sctp_sysctl.h>
LIST_HEAD(sctppcbhead, sctp_inpcb);
LIST_HEAD(sctpasochead, sctp_tcb);
@ -139,6 +140,7 @@ struct sctp_tagblock {
struct sctp_timewait vtag_block[SCTP_NUMBER_IN_VTAG_BLOCK];
};
struct sctp_epinfo {
struct sctpasochead *sctp_asochash;
u_long hashasocmark;
@ -237,6 +239,23 @@ struct sctp_epinfo {
};
struct sctp_base_info {
/*
* All static structures that anchor the system must be here.
*/
struct sctp_epinfo sctppcbinfo;
struct sctpstat sctpstat;
struct sctp_sysctl sctpsysctl;
uint8_t first_time;
char sctp_pcb_initialized;
#if defined(SCTP_PACKET_LOGGING)
int packet_log_writers;
int packet_log_end;
uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE];
#endif
};
/*-
* Here we have all the relevant information for each SCTP entity created. We
* will need to modify this as approprate. We also need to figure out how to
@ -433,7 +452,11 @@ struct sctp_tcb {
#if defined(_KERNEL)
extern struct sctp_epinfo sctppcbinfo;
/* Attention Julian, this is the extern that
* goes with the base info. sctp_pcb.c has
* the real definition.
*/
extern struct sctp_base_info system_base_info;
#ifdef INET6
int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b);
@ -556,6 +579,7 @@ int sctp_del_remote_addr(struct sctp_tcb *, struct sockaddr *);
void sctp_pcb_init(void);
void sctp_pcb_finish(void);
void sctp_add_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
void sctp_del_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);

View File

@ -212,7 +212,7 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
/* We remove it right away */
#ifdef SCTP_LOCK_LOGGING
if (sctp_logging_level & SCTP_LOCK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
}
#endif

View File

@ -32,86 +32,90 @@
__FBSDID("$FreeBSD$");
#include <netinet/sctp_os.h>
#include <netinet/sctp.h>
#include <netinet/sctp_constants.h>
#include <netinet/sctp_sysctl.h>
#include <netinet/sctp_pcb.h>
#include <netinet/sctputil.h>
#include <netinet/sctp_output.h>
/*
* sysctl tunable variables
*/
uint32_t sctp_sendspace = SCTPCTL_MAXDGRAM_DEFAULT;
uint32_t sctp_recvspace = SCTPCTL_RECVSPACE_DEFAULT;
uint32_t sctp_auto_asconf = SCTPCTL_AUTOASCONF_DEFAULT;
uint32_t sctp_multiple_asconfs = SCTPCTL_MULTIPLEASCONFS_DEFAULT;
uint32_t sctp_ecn_enable = SCTPCTL_ECN_ENABLE_DEFAULT;
uint32_t sctp_ecn_nonce = SCTPCTL_ECN_NONCE_DEFAULT;
uint32_t sctp_strict_sacks = SCTPCTL_STRICT_SACKS_DEFAULT;
uint32_t sctp_no_csum_on_loopback = SCTPCTL_LOOPBACK_NOCSUM_DEFAULT;
uint32_t sctp_strict_init = SCTPCTL_STRICT_INIT_DEFAULT;
uint32_t sctp_peer_chunk_oh = SCTPCTL_PEER_CHKOH_DEFAULT;
uint32_t sctp_max_burst_default = SCTPCTL_MAXBURST_DEFAULT;
uint32_t sctp_max_chunks_on_queue = SCTPCTL_MAXCHUNKS_DEFAULT;
uint32_t sctp_hashtblsize = SCTPCTL_TCBHASHSIZE_DEFAULT;
uint32_t sctp_pcbtblsize = SCTPCTL_PCBHASHSIZE_DEFAULT;
uint32_t sctp_min_split_point = SCTPCTL_MIN_SPLIT_POINT_DEFAULT;
uint32_t sctp_chunkscale = SCTPCTL_CHUNKSCALE_DEFAULT;
uint32_t sctp_delayed_sack_time_default = SCTPCTL_DELAYED_SACK_TIME_DEFAULT;
uint32_t sctp_sack_freq_default = SCTPCTL_SACK_FREQ_DEFAULT;
uint32_t sctp_system_free_resc_limit = SCTPCTL_SYS_RESOURCE_DEFAULT;
uint32_t sctp_asoc_free_resc_limit = SCTPCTL_ASOC_RESOURCE_DEFAULT;
uint32_t sctp_heartbeat_interval_default = SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT;
uint32_t sctp_pmtu_raise_time_default = SCTPCTL_PMTU_RAISE_TIME_DEFAULT;
uint32_t sctp_shutdown_guard_time_default = SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT;
uint32_t sctp_secret_lifetime_default = SCTPCTL_SECRET_LIFETIME_DEFAULT;
uint32_t sctp_rto_max_default = SCTPCTL_RTO_MAX_DEFAULT;
uint32_t sctp_rto_min_default = SCTPCTL_RTO_MIN_DEFAULT;
uint32_t sctp_rto_initial_default = SCTPCTL_RTO_INITIAL_DEFAULT;
uint32_t sctp_init_rto_max_default = SCTPCTL_INIT_RTO_MAX_DEFAULT;
uint32_t sctp_valid_cookie_life_default = SCTPCTL_VALID_COOKIE_LIFE_DEFAULT;
uint32_t sctp_init_rtx_max_default = SCTPCTL_INIT_RTX_MAX_DEFAULT;
uint32_t sctp_assoc_rtx_max_default = SCTPCTL_ASSOC_RTX_MAX_DEFAULT;
uint32_t sctp_path_rtx_max_default = SCTPCTL_PATH_RTX_MAX_DEFAULT;
uint32_t sctp_add_more_threshold = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
uint32_t sctp_nr_outgoing_streams_default = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
uint32_t sctp_cmt_on_off = SCTPCTL_CMT_ON_OFF_DEFAULT;
uint32_t sctp_cmt_use_dac = SCTPCTL_CMT_USE_DAC_DEFAULT;
uint32_t sctp_cmt_pf = SCTPCTL_CMT_PF_DEFAULT;
uint32_t sctp_use_cwnd_based_maxburst = SCTPCTL_CWND_MAXBURST_DEFAULT;
uint32_t sctp_early_fr = SCTPCTL_EARLY_FAST_RETRAN_DEFAULT;
uint32_t sctp_early_fr_msec = SCTPCTL_EARLY_FAST_RETRAN_MSEC_DEFAULT;
uint32_t sctp_asconf_auth_nochk = SCTPCTL_ASCONF_AUTH_NOCHK_DEFAULT;
uint32_t sctp_auth_disable = SCTPCTL_AUTH_DISABLE_DEFAULT;
uint32_t sctp_nat_friendly = SCTPCTL_NAT_FRIENDLY_DEFAULT;
uint32_t sctp_L2_abc_variable = SCTPCTL_ABC_L_VAR_DEFAULT;
uint32_t sctp_mbuf_threshold_count = SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT;
uint32_t sctp_do_drain = SCTPCTL_DO_SCTP_DRAIN_DEFAULT;
uint32_t sctp_hb_maxburst = SCTPCTL_HB_MAX_BURST_DEFAULT;
uint32_t sctp_abort_if_one_2_one_hits_limit = SCTPCTL_ABORT_AT_LIMIT_DEFAULT;
uint32_t sctp_strict_data_order = SCTPCTL_STRICT_DATA_ORDER_DEFAULT;
uint32_t sctp_min_residual = SCTPCTL_MIN_RESIDUAL_DEFAULT;
uint32_t sctp_max_retran_chunk = SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT;
uint32_t sctp_logging_level = SCTPCTL_LOGGING_LEVEL_DEFAULT;
void
sctp_init_sysctls()
{
SCTP_BASE_SYSCTL(sctp_sendspace) = SCTPCTL_MAXDGRAM_DEFAULT;
SCTP_BASE_SYSCTL(sctp_recvspace) = SCTPCTL_RECVSPACE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_auto_asconf) = SCTPCTL_AUTOASCONF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_multiple_asconfs) = SCTPCTL_MULTIPLEASCONFS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_ecn_enable) = SCTPCTL_ECN_ENABLE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_ecn_nonce) = SCTPCTL_ECN_NONCE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_strict_sacks) = SCTPCTL_STRICT_SACKS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) = SCTPCTL_LOOPBACK_NOCSUM_DEFAULT;
SCTP_BASE_SYSCTL(sctp_strict_init) = SCTPCTL_STRICT_INIT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_peer_chunk_oh) = SCTPCTL_PEER_CHKOH_DEFAULT;
SCTP_BASE_SYSCTL(sctp_max_burst_default) = SCTPCTL_MAXBURST_DEFAULT;
SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = SCTPCTL_MAXCHUNKS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_hashtblsize) = SCTPCTL_TCBHASHSIZE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_pcbtblsize) = SCTPCTL_PCBHASHSIZE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_min_split_point) = SCTPCTL_MIN_SPLIT_POINT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_chunkscale) = SCTPCTL_CHUNKSCALE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default) = SCTPCTL_DELAYED_SACK_TIME_DEFAULT;
SCTP_BASE_SYSCTL(sctp_sack_freq_default) = SCTPCTL_SACK_FREQ_DEFAULT;
SCTP_BASE_SYSCTL(sctp_system_free_resc_limit) = SCTPCTL_SYS_RESOURCE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit) = SCTPCTL_ASOC_RESOURCE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default) = SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT;
SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default) = SCTPCTL_PMTU_RAISE_TIME_DEFAULT;
SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default) = SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT;
SCTP_BASE_SYSCTL(sctp_secret_lifetime_default) = SCTPCTL_SECRET_LIFETIME_DEFAULT;
SCTP_BASE_SYSCTL(sctp_rto_max_default) = SCTPCTL_RTO_MAX_DEFAULT;
SCTP_BASE_SYSCTL(sctp_rto_min_default) = SCTPCTL_RTO_MIN_DEFAULT;
SCTP_BASE_SYSCTL(sctp_rto_initial_default) = SCTPCTL_RTO_INITIAL_DEFAULT;
SCTP_BASE_SYSCTL(sctp_init_rto_max_default) = SCTPCTL_INIT_RTO_MAX_DEFAULT;
SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default) = SCTPCTL_VALID_COOKIE_LIFE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_init_rtx_max_default) = SCTPCTL_INIT_RTX_MAX_DEFAULT;
SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default) = SCTPCTL_ASSOC_RTX_MAX_DEFAULT;
SCTP_BASE_SYSCTL(sctp_path_rtx_max_default) = SCTPCTL_PATH_RTX_MAX_DEFAULT;
SCTP_BASE_SYSCTL(sctp_add_more_threshold) = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default) = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_on_off) = SCTPCTL_CMT_ON_OFF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_use_dac) = SCTPCTL_CMT_USE_DAC_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_pf) = SCTPCTL_CMT_PF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) = SCTPCTL_CWND_MAXBURST_DEFAULT;
SCTP_BASE_SYSCTL(sctp_early_fr) = SCTPCTL_EARLY_FAST_RETRAN_DEFAULT;
SCTP_BASE_SYSCTL(sctp_early_fr_msec) = SCTPCTL_EARLY_FAST_RETRAN_MSEC_DEFAULT;
SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) = SCTPCTL_ASCONF_AUTH_NOCHK_DEFAULT;
SCTP_BASE_SYSCTL(sctp_auth_disable) = SCTPCTL_AUTH_DISABLE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_nat_friendly) = SCTPCTL_NAT_FRIENDLY_DEFAULT;
SCTP_BASE_SYSCTL(sctp_L2_abc_variable) = SCTPCTL_ABC_L_VAR_DEFAULT;
SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) = SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_do_drain) = SCTPCTL_DO_SCTP_DRAIN_DEFAULT;
SCTP_BASE_SYSCTL(sctp_hb_maxburst) = SCTPCTL_HB_MAX_BURST_DEFAULT;
SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit) = SCTPCTL_ABORT_AT_LIMIT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_strict_data_order) = SCTPCTL_STRICT_DATA_ORDER_DEFAULT;
SCTP_BASE_SYSCTL(sctp_min_residual) = SCTPCTL_MIN_RESIDUAL_DEFAULT;
SCTP_BASE_SYSCTL(sctp_max_retran_chunk) = SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT;
SCTP_BASE_SYSCTL(sctp_logging_level) = SCTPCTL_LOGGING_LEVEL_DEFAULT;
/* JRS - Variable for default congestion control module */
uint32_t sctp_default_cc_module = SCTPCTL_DEFAULT_CC_MODULE_DEFAULT;
uint32_t sctp_default_frag_interleave = SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT;
uint32_t sctp_mobility_base = SCTPCTL_MOBILITY_BASE_DEFAULT;
uint32_t sctp_mobility_fasthandoff = SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_default_cc_module) = SCTPCTL_DEFAULT_CC_MODULE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_default_frag_interleave) = SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_mobility_base) = SCTPCTL_MOBILITY_BASE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) = SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT;
#if defined(SCTP_LOCAL_TRACE_BUF)
struct sctp_log sctp_log;
memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
#endif
uint32_t sctp_udp_tunneling_for_client_enable = SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT;
uint32_t sctp_udp_tunneling_port = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
#ifdef SCTP_DEBUG
uint32_t sctp_debug_on = SCTPCTL_DEBUG_DEFAULT;
SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable) = SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) = SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT;
#if defined(SCTP_DEBUG)
SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT;
#endif
struct sctpstat sctpstat;
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
SCTP_BASE_SYSCTL(sctp_output_unlocked) = SCTPCTL_OUTPUT_UNLOCKED_DEFAULT;
#endif
}
/* It returns an upper limit. No filtering is done here */
static unsigned int
@ -329,7 +333,7 @@ sctp_assoclist(SYSCTL_HANDLER_ARGS)
SCTP_INP_INFO_RLOCK();
if (req->oldptr == USER_ADDR_NULL) {
LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
SCTP_INP_RLOCK(inp);
number_of_endpoints++;
number_of_local_addresses += number_of_addresses(inp);
@ -357,7 +361,7 @@ sctp_assoclist(SYSCTL_HANDLER_ARGS)
SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_SYSCTL, EPERM);
return EPERM;
}
LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
SCTP_INP_RLOCK(inp);
xinpcb.last = 0;
xinpcb.local_port = ntohs(inp->sctp_lport);
@ -509,22 +513,23 @@ sysctl_sctp_udp_tunneling_check(SYSCTL_HANDLER_ARGS)
int error;
uint32_t old_sctp_udp_tunneling_port;
old_sctp_udp_tunneling_port = sctp_udp_tunneling_port;
old_sctp_udp_tunneling_port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0) {
RANGECHK(sctp_udp_tunneling_port, SCTPCTL_UDP_TUNNELING_PORT_MIN, SCTPCTL_UDP_TUNNELING_PORT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port), SCTPCTL_UDP_TUNNELING_PORT_MIN, SCTPCTL_UDP_TUNNELING_PORT_MAX);
if (old_sctp_udp_tunneling_port) {
sctp_over_udp_stop();
}
if (sctp_udp_tunneling_port) {
if (SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) {
if (sctp_over_udp_start()) {
sctp_udp_tunneling_port = 0;
SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = 0;
}
}
}
return (error);
}
static int
sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
{
@ -532,328 +537,363 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0) {
RANGECHK(sctp_sendspace, SCTPCTL_MAXDGRAM_MIN, SCTPCTL_MAXDGRAM_MAX);
RANGECHK(sctp_recvspace, SCTPCTL_RECVSPACE_MIN, SCTPCTL_RECVSPACE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_sendspace), SCTPCTL_MAXDGRAM_MIN, SCTPCTL_MAXDGRAM_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_recvspace), SCTPCTL_RECVSPACE_MIN, SCTPCTL_RECVSPACE_MAX);
#if defined(__FreeBSD__) || defined(SCTP_APPLE_AUTO_ASCONF)
RANGECHK(sctp_auto_asconf, SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_auto_asconf), SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
#endif
RANGECHK(sctp_ecn_enable, SCTPCTL_ECN_ENABLE_MIN, SCTPCTL_ECN_ENABLE_MAX);
RANGECHK(sctp_ecn_nonce, SCTPCTL_ECN_NONCE_MIN, SCTPCTL_ECN_NONCE_MAX);
RANGECHK(sctp_strict_sacks, SCTPCTL_STRICT_SACKS_MIN, SCTPCTL_STRICT_SACKS_MAX);
RANGECHK(sctp_no_csum_on_loopback, SCTPCTL_LOOPBACK_NOCSUM_MIN, SCTPCTL_LOOPBACK_NOCSUM_MAX);
RANGECHK(sctp_strict_init, SCTPCTL_STRICT_INIT_MIN, SCTPCTL_STRICT_INIT_MAX);
RANGECHK(sctp_peer_chunk_oh, SCTPCTL_PEER_CHKOH_MIN, SCTPCTL_PEER_CHKOH_MAX);
RANGECHK(sctp_max_burst_default, SCTPCTL_MAXBURST_MIN, SCTPCTL_MAXBURST_MAX);
RANGECHK(sctp_max_chunks_on_queue, SCTPCTL_MAXCHUNKS_MIN, SCTPCTL_MAXCHUNKS_MAX);
RANGECHK(sctp_hashtblsize, SCTPCTL_TCBHASHSIZE_MIN, SCTPCTL_TCBHASHSIZE_MAX);
RANGECHK(sctp_pcbtblsize, SCTPCTL_PCBHASHSIZE_MIN, SCTPCTL_PCBHASHSIZE_MAX);
RANGECHK(sctp_min_split_point, SCTPCTL_MIN_SPLIT_POINT_MIN, SCTPCTL_MIN_SPLIT_POINT_MAX);
RANGECHK(sctp_chunkscale, SCTPCTL_CHUNKSCALE_MIN, SCTPCTL_CHUNKSCALE_MAX);
RANGECHK(sctp_delayed_sack_time_default, SCTPCTL_DELAYED_SACK_TIME_MIN, SCTPCTL_DELAYED_SACK_TIME_MAX);
RANGECHK(sctp_sack_freq_default, SCTPCTL_SACK_FREQ_MIN, SCTPCTL_SACK_FREQ_MAX);
RANGECHK(sctp_system_free_resc_limit, SCTPCTL_SYS_RESOURCE_MIN, SCTPCTL_SYS_RESOURCE_MAX);
RANGECHK(sctp_asoc_free_resc_limit, SCTPCTL_ASOC_RESOURCE_MIN, SCTPCTL_ASOC_RESOURCE_MAX);
RANGECHK(sctp_heartbeat_interval_default, SCTPCTL_HEARTBEAT_INTERVAL_MIN, SCTPCTL_HEARTBEAT_INTERVAL_MAX);
RANGECHK(sctp_pmtu_raise_time_default, SCTPCTL_PMTU_RAISE_TIME_MIN, SCTPCTL_PMTU_RAISE_TIME_MAX);
RANGECHK(sctp_shutdown_guard_time_default, SCTPCTL_SHUTDOWN_GUARD_TIME_MIN, SCTPCTL_SHUTDOWN_GUARD_TIME_MAX);
RANGECHK(sctp_secret_lifetime_default, SCTPCTL_SECRET_LIFETIME_MIN, SCTPCTL_SECRET_LIFETIME_MAX);
RANGECHK(sctp_rto_max_default, SCTPCTL_RTO_MAX_MIN, SCTPCTL_RTO_MAX_MAX);
RANGECHK(sctp_rto_min_default, SCTPCTL_RTO_MIN_MIN, SCTPCTL_RTO_MIN_MAX);
RANGECHK(sctp_rto_initial_default, SCTPCTL_RTO_INITIAL_MIN, SCTPCTL_RTO_INITIAL_MAX);
RANGECHK(sctp_init_rto_max_default, SCTPCTL_INIT_RTO_MAX_MIN, SCTPCTL_INIT_RTO_MAX_MAX);
RANGECHK(sctp_valid_cookie_life_default, SCTPCTL_VALID_COOKIE_LIFE_MIN, SCTPCTL_VALID_COOKIE_LIFE_MAX);
RANGECHK(sctp_init_rtx_max_default, SCTPCTL_INIT_RTX_MAX_MIN, SCTPCTL_INIT_RTX_MAX_MAX);
RANGECHK(sctp_assoc_rtx_max_default, SCTPCTL_ASSOC_RTX_MAX_MIN, SCTPCTL_ASSOC_RTX_MAX_MAX);
RANGECHK(sctp_path_rtx_max_default, SCTPCTL_PATH_RTX_MAX_MIN, SCTPCTL_PATH_RTX_MAX_MAX);
RANGECHK(sctp_add_more_threshold, SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
RANGECHK(sctp_nr_outgoing_streams_default, SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
RANGECHK(sctp_cmt_on_off, SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
RANGECHK(sctp_cmt_use_dac, SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
RANGECHK(sctp_cmt_pf, SCTPCTL_CMT_PF_MIN, SCTPCTL_CMT_PF_MAX);
RANGECHK(sctp_use_cwnd_based_maxburst, SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
RANGECHK(sctp_early_fr, SCTPCTL_EARLY_FAST_RETRAN_MIN, SCTPCTL_EARLY_FAST_RETRAN_MAX);
RANGECHK(sctp_early_fr_msec, SCTPCTL_EARLY_FAST_RETRAN_MSEC_MIN, SCTPCTL_EARLY_FAST_RETRAN_MSEC_MAX);
RANGECHK(sctp_asconf_auth_nochk, SCTPCTL_ASCONF_AUTH_NOCHK_MIN, SCTPCTL_ASCONF_AUTH_NOCHK_MAX);
RANGECHK(sctp_auth_disable, SCTPCTL_AUTH_DISABLE_MIN, SCTPCTL_AUTH_DISABLE_MAX);
RANGECHK(sctp_nat_friendly, SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
RANGECHK(sctp_L2_abc_variable, SCTPCTL_ABC_L_VAR_MIN, SCTPCTL_ABC_L_VAR_MAX);
RANGECHK(sctp_mbuf_threshold_count, SCTPCTL_MAX_CHAINED_MBUFS_MIN, SCTPCTL_MAX_CHAINED_MBUFS_MAX);
RANGECHK(sctp_do_drain, SCTPCTL_DO_SCTP_DRAIN_MIN, SCTPCTL_DO_SCTP_DRAIN_MAX);
RANGECHK(sctp_hb_maxburst, SCTPCTL_HB_MAX_BURST_MIN, SCTPCTL_HB_MAX_BURST_MAX);
RANGECHK(sctp_abort_if_one_2_one_hits_limit, SCTPCTL_ABORT_AT_LIMIT_MIN, SCTPCTL_ABORT_AT_LIMIT_MAX);
RANGECHK(sctp_strict_data_order, SCTPCTL_STRICT_DATA_ORDER_MIN, SCTPCTL_STRICT_DATA_ORDER_MAX);
RANGECHK(sctp_min_residual, SCTPCTL_MIN_RESIDUAL_MIN, SCTPCTL_MIN_RESIDUAL_MAX);
RANGECHK(sctp_max_retran_chunk, SCTPCTL_MAX_RETRAN_CHUNK_MIN, SCTPCTL_MAX_RETRAN_CHUNK_MAX);
RANGECHK(sctp_logging_level, SCTPCTL_LOGGING_LEVEL_MIN, SCTPCTL_LOGGING_LEVEL_MAX);
RANGECHK(sctp_default_cc_module, SCTPCTL_DEFAULT_CC_MODULE_MIN, SCTPCTL_DEFAULT_CC_MODULE_MAX);
RANGECHK(sctp_default_frag_interleave, SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN, SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_ecn_enable), SCTPCTL_ECN_ENABLE_MIN, SCTPCTL_ECN_ENABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_ecn_nonce), SCTPCTL_ECN_NONCE_MIN, SCTPCTL_ECN_NONCE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_strict_sacks), SCTPCTL_STRICT_SACKS_MIN, SCTPCTL_STRICT_SACKS_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), SCTPCTL_LOOPBACK_NOCSUM_MIN, SCTPCTL_LOOPBACK_NOCSUM_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_strict_init), SCTPCTL_STRICT_INIT_MIN, SCTPCTL_STRICT_INIT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), SCTPCTL_PEER_CHKOH_MIN, SCTPCTL_PEER_CHKOH_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_max_burst_default), SCTPCTL_MAXBURST_MIN, SCTPCTL_MAXBURST_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), SCTPCTL_MAXCHUNKS_MIN, SCTPCTL_MAXCHUNKS_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_hashtblsize), SCTPCTL_TCBHASHSIZE_MIN, SCTPCTL_TCBHASHSIZE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_pcbtblsize), SCTPCTL_PCBHASHSIZE_MIN, SCTPCTL_PCBHASHSIZE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_min_split_point), SCTPCTL_MIN_SPLIT_POINT_MIN, SCTPCTL_MIN_SPLIT_POINT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_chunkscale), SCTPCTL_CHUNKSCALE_MIN, SCTPCTL_CHUNKSCALE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), SCTPCTL_DELAYED_SACK_TIME_MIN, SCTPCTL_DELAYED_SACK_TIME_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_sack_freq_default), SCTPCTL_SACK_FREQ_MIN, SCTPCTL_SACK_FREQ_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), SCTPCTL_SYS_RESOURCE_MIN, SCTPCTL_SYS_RESOURCE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), SCTPCTL_ASOC_RESOURCE_MIN, SCTPCTL_ASOC_RESOURCE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), SCTPCTL_HEARTBEAT_INTERVAL_MIN, SCTPCTL_HEARTBEAT_INTERVAL_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), SCTPCTL_PMTU_RAISE_TIME_MIN, SCTPCTL_PMTU_RAISE_TIME_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), SCTPCTL_SHUTDOWN_GUARD_TIME_MIN, SCTPCTL_SHUTDOWN_GUARD_TIME_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), SCTPCTL_SECRET_LIFETIME_MIN, SCTPCTL_SECRET_LIFETIME_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_max_default), SCTPCTL_RTO_MAX_MIN, SCTPCTL_RTO_MAX_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_min_default), SCTPCTL_RTO_MIN_MIN, SCTPCTL_RTO_MIN_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_initial_default), SCTPCTL_RTO_INITIAL_MIN, SCTPCTL_RTO_INITIAL_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rto_max_default), SCTPCTL_INIT_RTO_MAX_MIN, SCTPCTL_INIT_RTO_MAX_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), SCTPCTL_VALID_COOKIE_LIFE_MIN, SCTPCTL_VALID_COOKIE_LIFE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), SCTPCTL_INIT_RTX_MAX_MIN, SCTPCTL_INIT_RTX_MAX_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), SCTPCTL_ASSOC_RTX_MAX_MIN, SCTPCTL_ASSOC_RTX_MAX_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), SCTPCTL_PATH_RTX_MAX_MIN, SCTPCTL_PATH_RTX_MAX_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_on_off), SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_use_dac), SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_pf), SCTPCTL_CMT_PF_MIN, SCTPCTL_CMT_PF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_early_fr), SCTPCTL_EARLY_FAST_RETRAN_MIN, SCTPCTL_EARLY_FAST_RETRAN_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_early_fr_msec), SCTPCTL_EARLY_FAST_RETRAN_MSEC_MIN, SCTPCTL_EARLY_FAST_RETRAN_MSEC_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk), SCTPCTL_ASCONF_AUTH_NOCHK_MIN, SCTPCTL_ASCONF_AUTH_NOCHK_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_auth_disable), SCTPCTL_AUTH_DISABLE_MIN, SCTPCTL_AUTH_DISABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_nat_friendly), SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_L2_abc_variable), SCTPCTL_ABC_L_VAR_MIN, SCTPCTL_ABC_L_VAR_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), SCTPCTL_MAX_CHAINED_MBUFS_MIN, SCTPCTL_MAX_CHAINED_MBUFS_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_do_drain), SCTPCTL_DO_SCTP_DRAIN_MIN, SCTPCTL_DO_SCTP_DRAIN_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_hb_maxburst), SCTPCTL_HB_MAX_BURST_MIN, SCTPCTL_HB_MAX_BURST_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), SCTPCTL_ABORT_AT_LIMIT_MIN, SCTPCTL_ABORT_AT_LIMIT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_strict_data_order), SCTPCTL_STRICT_DATA_ORDER_MIN, SCTPCTL_STRICT_DATA_ORDER_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_min_residual), SCTPCTL_MIN_RESIDUAL_MIN, SCTPCTL_MIN_RESIDUAL_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_max_retran_chunk), SCTPCTL_MAX_RETRAN_CHUNK_MIN, SCTPCTL_MAX_RETRAN_CHUNK_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_logging_level), SCTPCTL_LOGGING_LEVEL_MIN, SCTPCTL_LOGGING_LEVEL_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_default_cc_module), SCTPCTL_DEFAULT_CC_MODULE_MIN, SCTPCTL_DEFAULT_CC_MODULE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_default_frag_interleave), SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN, SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX);
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_BASE)
RANGECHK(sctp_mobility_base, SCTPCTL_MOBILITY_BASE_MIN, SCTPCTL_MOBILITY_BASE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_base), SCTPCTL_MOBILITY_BASE_MIN, SCTPCTL_MOBILITY_BASE_MAX);
#endif
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_FASTHANDOFF)
RANGECHK(sctp_mobility_fasthandoff, SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
#endif
RANGECHK(sctp_udp_tunneling_for_client_enable, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN, SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX);
#ifdef SCTP_DEBUG
RANGECHK(sctp_debug_on, SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_debug_on), SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
#endif
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
RANGECHK(SCTP_BASE_SYSCTL(sctp_output_unlocked), SCTPCTL_OUTPUT_UNLOCKED_MIN, SCTPCTL_OUTPUT_UNLOCKED_MAX);
#endif
}
return (error);
}
#if defined(SCTP_LOCAL_TRACE_BUF)
static int
sysctl_sctp_cleartrace(SYSCTL_HANDLER_ARGS)
{
memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
return (0);
}
#endif
/*
* sysctl definitions
*/
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, sendspace, CTLTYPE_INT | CTLFLAG_RW,
&sctp_sendspace, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_sendspace), 0, sysctl_sctp_check, "IU",
SCTPCTL_MAXDGRAM_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, recvspace, CTLTYPE_INT | CTLFLAG_RW,
&sctp_recvspace, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_recvspace), 0, sysctl_sctp_check, "IU",
SCTPCTL_RECVSPACE_DESC);
#if defined(__FreeBSD__) || defined(SCTP_APPLE_AUTO_ASCONF)
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, auto_asconf, CTLTYPE_INT | CTLFLAG_RW,
&sctp_auto_asconf, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_auto_asconf), 0, sysctl_sctp_check, "IU",
SCTPCTL_AUTOASCONF_DESC);
#endif
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ecn_enable, CTLTYPE_INT | CTLFLAG_RW,
&sctp_ecn_enable, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_ecn_enable), 0, sysctl_sctp_check, "IU",
SCTPCTL_ECN_ENABLE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLTYPE_INT | CTLFLAG_RW,
&sctp_ecn_nonce, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_ecn_nonce), 0, sysctl_sctp_check, "IU",
SCTPCTL_ECN_NONCE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, strict_sacks, CTLTYPE_INT | CTLFLAG_RW,
&sctp_strict_sacks, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_strict_sacks), 0, sysctl_sctp_check, "IU",
SCTPCTL_STRICT_SACKS_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLTYPE_INT | CTLFLAG_RW,
&sctp_no_csum_on_loopback, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), 0, sysctl_sctp_check, "IU",
SCTPCTL_LOOPBACK_NOCSUM_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, strict_init, CTLTYPE_INT | CTLFLAG_RW,
&sctp_strict_init, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_strict_init), 0, sysctl_sctp_check, "IU",
SCTPCTL_STRICT_INIT_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLTYPE_INT | CTLFLAG_RW,
&sctp_peer_chunk_oh, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), 0, sysctl_sctp_check, "IU",
SCTPCTL_PEER_CHKOH_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, maxburst, CTLTYPE_INT | CTLFLAG_RW,
&sctp_max_burst_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_max_burst_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_MAXBURST_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, maxchunks, CTLTYPE_INT | CTLFLAG_RW,
&sctp_max_chunks_on_queue, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), 0, sysctl_sctp_check, "IU",
SCTPCTL_MAXCHUNKS_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, tcbhashsize, CTLTYPE_INT | CTLFLAG_RW,
&sctp_hashtblsize, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_hashtblsize), 0, sysctl_sctp_check, "IU",
SCTPCTL_TCBHASHSIZE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, pcbhashsize, CTLTYPE_INT | CTLFLAG_RW,
&sctp_pcbtblsize, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_pcbtblsize), 0, sysctl_sctp_check, "IU",
SCTPCTL_PCBHASHSIZE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, min_split_point, CTLTYPE_INT | CTLFLAG_RW,
&sctp_min_split_point, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_min_split_point), 0, sysctl_sctp_check, "IU",
SCTPCTL_MIN_SPLIT_POINT_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, chunkscale, CTLTYPE_INT | CTLFLAG_RW,
&sctp_chunkscale, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_chunkscale), 0, sysctl_sctp_check, "IU",
SCTPCTL_CHUNKSCALE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLTYPE_INT | CTLFLAG_RW,
&sctp_delayed_sack_time_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_DELAYED_SACK_TIME_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, sack_freq, CTLTYPE_INT | CTLFLAG_RW,
&sctp_sack_freq_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_sack_freq_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_SACK_FREQ_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, sys_resource, CTLTYPE_INT | CTLFLAG_RW,
&sctp_system_free_resc_limit, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), 0, sysctl_sctp_check, "IU",
SCTPCTL_SYS_RESOURCE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asoc_resource, CTLTYPE_INT | CTLFLAG_RW,
&sctp_asoc_free_resc_limit, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), 0, sysctl_sctp_check, "IU",
SCTPCTL_ASOC_RESOURCE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLTYPE_INT | CTLFLAG_RW,
&sctp_heartbeat_interval_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_HEARTBEAT_INTERVAL_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLTYPE_INT | CTLFLAG_RW,
&sctp_pmtu_raise_time_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_PMTU_RAISE_TIME_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLTYPE_INT | CTLFLAG_RW,
&sctp_shutdown_guard_time_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_SHUTDOWN_GUARD_TIME_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLTYPE_INT | CTLFLAG_RW,
&sctp_secret_lifetime_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_SECRET_LIFETIME_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, rto_max, CTLTYPE_INT | CTLFLAG_RW,
&sctp_rto_max_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_rto_max_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_RTO_MAX_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, rto_min, CTLTYPE_INT | CTLFLAG_RW,
&sctp_rto_min_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_rto_min_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_RTO_MIN_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, rto_initial, CTLTYPE_INT | CTLFLAG_RW,
&sctp_rto_initial_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_rto_initial_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_RTO_INITIAL_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, init_rto_max, CTLTYPE_INT | CTLFLAG_RW,
&sctp_init_rto_max_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_init_rto_max_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_INIT_RTO_MAX_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLTYPE_INT | CTLFLAG_RW,
&sctp_valid_cookie_life_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_VALID_COOKIE_LIFE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLTYPE_INT | CTLFLAG_RW,
&sctp_init_rtx_max_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_INIT_RTX_MAX_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLTYPE_INT | CTLFLAG_RW,
&sctp_assoc_rtx_max_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_ASSOC_RTX_MAX_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLTYPE_INT | CTLFLAG_RW,
&sctp_path_rtx_max_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_PATH_RTX_MAX_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, add_more_on_output, CTLTYPE_INT | CTLFLAG_RW,
&sctp_add_more_threshold, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_add_more_threshold), 0, sysctl_sctp_check, "IU",
SCTPCTL_ADD_MORE_ON_OUTPUT_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, outgoing_streams, CTLTYPE_INT | CTLFLAG_RW,
&sctp_nr_outgoing_streams_default, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), 0, sysctl_sctp_check, "IU",
SCTPCTL_OUTGOING_STREAMS_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLTYPE_INT | CTLFLAG_RW,
&sctp_cmt_on_off, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_cmt_on_off), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_ON_OFF_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLTYPE_INT | CTLFLAG_RW,
&sctp_cmt_use_dac, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_cmt_use_dac), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_USE_DAC_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_pf, CTLTYPE_INT | CTLFLAG_RW,
&sctp_cmt_pf, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_cmt_pf), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_PF_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cwnd_maxburst, CTLTYPE_INT | CTLFLAG_RW,
&sctp_use_cwnd_based_maxburst, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), 0, sysctl_sctp_check, "IU",
SCTPCTL_CWND_MAXBURST_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, early_fast_retran, CTLTYPE_INT | CTLFLAG_RW,
&sctp_early_fr, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_early_fr), 0, sysctl_sctp_check, "IU",
SCTPCTL_EARLY_FAST_RETRAN_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, early_fast_retran_msec, CTLTYPE_INT | CTLFLAG_RW,
&sctp_early_fr_msec, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_early_fr_msec), 0, sysctl_sctp_check, "IU",
SCTPCTL_EARLY_FAST_RETRAN_MSEC_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asconf_auth_nochk, CTLTYPE_INT | CTLFLAG_RW,
&sctp_asconf_auth_nochk, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk), 0, sysctl_sctp_check, "IU",
SCTPCTL_ASCONF_AUTH_NOCHK_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, auth_disable, CTLTYPE_INT | CTLFLAG_RW,
&sctp_auth_disable, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_auth_disable), 0, sysctl_sctp_check, "IU",
SCTPCTL_AUTH_DISABLE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nat_friendly, CTLTYPE_INT | CTLFLAG_RW,
&sctp_nat_friendly, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_nat_friendly), 0, sysctl_sctp_check, "IU",
SCTPCTL_NAT_FRIENDLY_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, abc_l_var, CTLTYPE_INT | CTLFLAG_RW,
&sctp_L2_abc_variable, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_L2_abc_variable), 0, sysctl_sctp_check, "IU",
SCTPCTL_ABC_L_VAR_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, max_chained_mbufs, CTLTYPE_INT | CTLFLAG_RW,
&sctp_mbuf_threshold_count, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), 0, sysctl_sctp_check, "IU",
SCTPCTL_MAX_CHAINED_MBUFS_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, do_sctp_drain, CTLTYPE_INT | CTLFLAG_RW,
&sctp_do_drain, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_do_drain), 0, sysctl_sctp_check, "IU",
SCTPCTL_DO_SCTP_DRAIN_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, hb_max_burst, CTLTYPE_INT | CTLFLAG_RW,
&sctp_hb_maxburst, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_hb_maxburst), 0, sysctl_sctp_check, "IU",
SCTPCTL_HB_MAX_BURST_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, abort_at_limit, CTLTYPE_INT | CTLFLAG_RW,
&sctp_abort_if_one_2_one_hits_limit, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), 0, sysctl_sctp_check, "IU",
SCTPCTL_ABORT_AT_LIMIT_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, strict_data_order, CTLTYPE_INT | CTLFLAG_RW,
&sctp_strict_data_order, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_strict_data_order), 0, sysctl_sctp_check, "IU",
SCTPCTL_STRICT_DATA_ORDER_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, min_residual, CTLTYPE_INT | CTLFLAG_RW,
&sctp_min_residual, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_min_residual), 0, sysctl_sctp_check, "IU",
SCTPCTL_MIN_RESIDUAL_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, max_retran_chunk, CTLTYPE_INT | CTLFLAG_RW,
&sctp_max_retran_chunk, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_max_retran_chunk), 0, sysctl_sctp_check, "IU",
SCTPCTL_MAX_RETRAN_CHUNK_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, log_level, CTLTYPE_INT | CTLFLAG_RW,
&sctp_logging_level, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_logging_level), 0, sysctl_sctp_check, "IU",
SCTPCTL_LOGGING_LEVEL_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, default_cc_module, CTLTYPE_INT | CTLFLAG_RW,
&sctp_default_cc_module, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_default_cc_module), 0, sysctl_sctp_check, "IU",
SCTPCTL_DEFAULT_CC_MODULE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, default_frag_interleave, CTLTYPE_INT | CTLFLAG_RW,
&sctp_default_frag_interleave, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_default_frag_interleave), 0, sysctl_sctp_check, "IU",
SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC);
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_BASE)
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mobility_base, CTLTYPE_INT | CTLFLAG_RW,
&sctp_mobility_base, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_mobility_base), 0, sysctl_sctp_check, "IU",
SCTPCTL_MOBILITY_BASE_DESC);
#endif
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_FASTHANDOFF)
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mobility_fasthandoff, CTLTYPE_INT | CTLFLAG_RW,
&sctp_mobility_fasthandoff, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), 0, sysctl_sctp_check, "IU",
SCTPCTL_MOBILITY_FASTHANDOFF_DESC);
#endif
#if defined(SCTP_LOCAL_TRACE_BUF)
SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, log, CTLFLAG_RD,
&sctp_log, sctp_log,
&SCTP_BASE_SYSCTL(sctp_log), sctp_log,
"SCTP logging (struct sctp_log)");
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, clear_trace, CTLTYPE_OPAQUE | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_log), 0, sysctl_sctp_cleartrace, "IU",
"Clear SCTP Logging buffer");
#endif
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_for_client_enable, CTLTYPE_INT | CTLFLAG_RW,
&sctp_udp_tunneling_for_client_enable, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), 0, sysctl_sctp_check, "IU",
SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_port, CTLTYPE_INT | CTLFLAG_RW,
&sctp_udp_tunneling_port, 0, sysctl_sctp_udp_tunneling_check, "IU",
&SCTP_BASE_SYSCTL(sctp_udp_tunneling_port), 0, sysctl_sctp_udp_tunneling_check, "IU",
SCTPCTL_UDP_TUNNELING_PORT_DESC);
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, enable_sack_immediately, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), 0, sysctl_sctp_check, "IU",
SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC);
#ifdef SCTP_DEBUG
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, debug, CTLTYPE_INT | CTLFLAG_RW,
&sctp_debug_on, 0, sysctl_sctp_check, "IU",
&SCTP_BASE_SYSCTL(sctp_debug_on), 0, sysctl_sctp_check, "IU",
SCTPCTL_DEBUG_DESC);
#endif /* SCTP_DEBUG */
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, output_unlocked, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_output_unlocked), 0, sysctl_sctp_check, "IU",
SCTPCTL_OUTPUT_UNLOCKED_DESC);
#endif
SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_RW,
&sctpstat, sctpstat,
&SCTP_BASE_STATS, sctpstat,
"SCTP statistics (struct sctp_stat)");
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoclist, CTLFLAG_RD,

View File

@ -37,6 +37,78 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_os.h>
#include <netinet/sctp_constants.h>
struct sctp_sysctl {
uint32_t sctp_sendspace;
uint32_t sctp_recvspace;
uint32_t sctp_auto_asconf;
uint32_t sctp_multiple_asconfs;
uint32_t sctp_ecn_enable;
uint32_t sctp_ecn_nonce;
uint32_t sctp_strict_sacks;
uint32_t sctp_no_csum_on_loopback;
uint32_t sctp_strict_init;
uint32_t sctp_peer_chunk_oh;
uint32_t sctp_max_burst_default;
uint32_t sctp_max_chunks_on_queue;
uint32_t sctp_hashtblsize;
uint32_t sctp_pcbtblsize;
uint32_t sctp_min_split_point;
uint32_t sctp_chunkscale;
uint32_t sctp_delayed_sack_time_default;
uint32_t sctp_sack_freq_default;
uint32_t sctp_system_free_resc_limit;
uint32_t sctp_asoc_free_resc_limit;
uint32_t sctp_heartbeat_interval_default;
uint32_t sctp_pmtu_raise_time_default;
uint32_t sctp_shutdown_guard_time_default;
uint32_t sctp_secret_lifetime_default;
uint32_t sctp_rto_max_default;
uint32_t sctp_rto_min_default;
uint32_t sctp_rto_initial_default;
uint32_t sctp_init_rto_max_default;
uint32_t sctp_valid_cookie_life_default;
uint32_t sctp_init_rtx_max_default;
uint32_t sctp_assoc_rtx_max_default;
uint32_t sctp_path_rtx_max_default;
uint32_t sctp_add_more_threshold;
uint32_t sctp_nr_outgoing_streams_default;
uint32_t sctp_cmt_on_off;
uint32_t sctp_cmt_use_dac;
uint32_t sctp_cmt_pf;
uint32_t sctp_use_cwnd_based_maxburst;
uint32_t sctp_early_fr;
uint32_t sctp_early_fr_msec;
uint32_t sctp_asconf_auth_nochk;
uint32_t sctp_auth_disable;
uint32_t sctp_nat_friendly;
uint32_t sctp_L2_abc_variable;
uint32_t sctp_mbuf_threshold_count;
uint32_t sctp_do_drain;
uint32_t sctp_hb_maxburst;
uint32_t sctp_abort_if_one_2_one_hits_limit;
uint32_t sctp_strict_data_order;
uint32_t sctp_min_residual;
uint32_t sctp_max_retran_chunk;
uint32_t sctp_logging_level;
/* JRS - Variable for default congestion control module */
uint32_t sctp_default_cc_module;
uint32_t sctp_default_frag_interleave;
uint32_t sctp_mobility_base;
uint32_t sctp_mobility_fasthandoff;
#if defined(SCTP_LOCAL_TRACE_BUF)
struct sctp_log sctp_log;
#endif
uint32_t sctp_udp_tunneling_for_client_enable;
uint32_t sctp_udp_tunneling_port;
uint32_t sctp_enable_sack_immediately;
#if defined(SCTP_DEBUG)
uint32_t sctp_debug_on;
#endif
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
uint32_t sctp_output_unlocked;
#endif
};
/*
* limits for the sysctl variables
*/
@ -388,6 +460,12 @@ __FBSDID("$FreeBSD$");
#define SCTPCTL_UDP_TUNNELING_PORT_MAX 65535
#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT SCTP_OVER_UDP_TUNNELING_PORT
/* Enable sending of the SACK-IMMEDIATELY bit */
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC "Enable sending of the SACK-IMMEDIATELY-bit."
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN 0
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX 1
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN
#if defined(SCTP_DEBUG)
/* debug: Configure debug output */
#define SCTPCTL_DEBUG_DESC "Configure debug output"
@ -397,90 +475,20 @@ __FBSDID("$FreeBSD$");
#endif
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
#define SCTPCTL_OUTPUT_UNLOCKED_DESC "Unlock socket when sending packets down to IP."
#define SCTPCTL_OUTPUT_UNLOCKED_MIN 0
#define SCTPCTL_OUTPUT_UNLOCKED_MAX 1
#define SCTPCTL_OUTPUT_UNLOCKED_DEFAULT SCTPCTL_OUTPUT_UNLOCKED_MIN
#endif
#if defined(_KERNEL)
/*
* variable definitions
*/
extern uint32_t sctp_sendspace;
extern uint32_t sctp_recvspace;
extern uint32_t sctp_auto_asconf;
extern uint32_t sctp_multiple_asconfs;
extern uint32_t sctp_ecn_enable;
extern uint32_t sctp_ecn_nonce;
extern uint32_t sctp_strict_sacks;
extern uint32_t sctp_no_csum_on_loopback;
extern uint32_t sctp_strict_init;
extern uint32_t sctp_peer_chunk_oh;
extern uint32_t sctp_max_burst_default;
extern uint32_t sctp_max_chunks_on_queue;
extern uint32_t sctp_hashtblsize;
extern uint32_t sctp_pcbtblsize;
extern uint32_t sctp_min_split_point;
extern uint32_t sctp_chunkscale;
extern uint32_t sctp_delayed_sack_time_default;
extern uint32_t sctp_sack_freq_default;
extern uint32_t sctp_system_free_resc_limit;
extern uint32_t sctp_asoc_free_resc_limit;
extern uint32_t sctp_heartbeat_interval_default;
extern uint32_t sctp_pmtu_raise_time_default;
extern uint32_t sctp_shutdown_guard_time_default;
extern uint32_t sctp_secret_lifetime_default;
extern uint32_t sctp_rto_max_default;
extern uint32_t sctp_rto_min_default;
extern uint32_t sctp_rto_initial_default;
extern uint32_t sctp_init_rto_max_default;
extern uint32_t sctp_valid_cookie_life_default;
extern uint32_t sctp_init_rtx_max_default;
extern uint32_t sctp_assoc_rtx_max_default;
extern uint32_t sctp_path_rtx_max_default;
extern uint32_t sctp_add_more_threshold;
extern uint32_t sctp_nr_outgoing_streams_default;
extern uint32_t sctp_cmt_on_off;
extern uint32_t sctp_cmt_use_dac;
/* JRS 5/21/07 - CMT PF type flag variables */
extern uint32_t sctp_cmt_pf;
extern uint32_t sctp_use_cwnd_based_maxburst;
extern uint32_t sctp_early_fr;
extern uint32_t sctp_early_fr_msec;
extern uint32_t sctp_asconf_auth_nochk;
extern uint32_t sctp_auth_disable;
extern uint32_t sctp_nat_friendly;
extern uint32_t sctp_L2_abc_variable;
extern uint32_t sctp_mbuf_threshold_count;
extern uint32_t sctp_do_drain;
extern uint32_t sctp_hb_maxburst;
extern uint32_t sctp_abort_if_one_2_one_hits_limit;
extern uint32_t sctp_strict_data_order;
extern uint32_t sctp_min_residual;
extern uint32_t sctp_max_retran_chunk;
extern uint32_t sctp_logging_level;
/* JRS - Variable for the default congestion control module */
extern uint32_t sctp_default_cc_module;
extern uint32_t sctp_default_frag_interleave;
extern uint32_t sctp_mobility_base;
extern uint32_t sctp_mobility_fasthandoff;
#if defined(SCTP_LOCAL_TRACE_BUF)
extern struct sctp_log sctp_log;
#endif
extern uint32_t sctp_udp_tunneling_for_client_enable;
extern uint32_t sctp_udp_tunneling_port;
#if defined(SCTP_DEBUG)
extern uint32_t sctp_debug_on;
#endif
extern struct sctpstat sctpstat;
#if defined(SYSCTL_DECL)
SYSCTL_DECL(_net_inet_sctp);
#endif
void sctp_init_sysctls(void);
#endif /* _KERNEL */
#endif /* __sctp_sysctl_h__ */

View File

@ -72,8 +72,8 @@ sctp_early_fr_timer(struct sctp_inpcb *inp,
cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
}
if (cur_rtt < sctp_early_fr_msec) {
cur_rtt = sctp_early_fr_msec;
if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
}
cur_rtt *= 1000;
tv.tv_sec = cur_rtt / 1000000;
@ -120,7 +120,7 @@ sctp_early_fr_timer(struct sctp_inpcb *inp,
continue;
}
}
if (sctp_logging_level & SCTP_EARLYFR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) {
sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
4, SCTP_FR_MARKED_EARLY);
}
@ -216,7 +216,7 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* not in PF state.
*/
/* Stop any running T3 timers here? */
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
net);
@ -239,7 +239,7 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
if (net) {
if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_INCR,
stcb->asoc.overall_error_count,
(stcb->asoc.overall_error_count + 1),
@ -249,7 +249,7 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
stcb->asoc.overall_error_count++;
}
} else {
if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_INCR,
stcb->asoc.overall_error_count,
(stcb->asoc.overall_error_count + 1),
@ -424,7 +424,7 @@ sctp_find_alternate_net(struct sctp_tcb *stcb,
return (net);
}
min_errors_net->dest_state &= ~SCTP_ADDR_PF;
min_errors_net->cwnd = min_errors_net->mtu * sctp_cmt_pf;
min_errors_net->cwnd = min_errors_net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) {
sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
stcb, min_errors_net,
@ -601,7 +601,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
/* get cur rto in micro-seconds */
cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
cur_rtt *= 1000;
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(cur_rtt,
stcb->asoc.peers_rwnd,
window_probe,
@ -625,7 +625,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
*/
min_wait.tv_sec = min_wait.tv_usec = 0;
}
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
}
@ -661,7 +661,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
*/
/* validate its been outstanding long enough */
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(chk->rec.data.TSN_seq,
chk->sent_rcv_time.tv_sec,
chk->sent_rcv_time.tv_usec,
@ -673,7 +673,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
* some seconds past our min.. forget it we
* will find no more to send.
*/
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(0,
chk->sent_rcv_time.tv_sec,
chk->sent_rcv_time.tv_usec,
@ -691,7 +691,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
* ok it was sent after our boundary
* time.
*/
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(0,
chk->sent_rcv_time.tv_sec,
chk->sent_rcv_time.tv_usec,
@ -735,7 +735,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
tsnfirst = chk->rec.data.TSN_seq;
}
tsnlast = chk->rec.data.TSN_seq;
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
0, SCTP_FR_T3_MARKED);
}
@ -746,7 +746,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
}
net->marked_retrans++;
stcb->asoc.marked_retrans++;
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
chk->whoTo->flight_size,
chk->book_size,
@ -756,7 +756,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
sctp_flight_size_decrease(chk);
sctp_total_flight_decrease(stcb, chk);
stcb->asoc.peers_rwnd += chk->send_size;
stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
}
chk->sent = SCTP_DATAGRAM_RESEND;
SCTP_STAT_INCR(sctps_markedretrans);
@ -781,7 +781,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
/*
* CMT: Do not allow FRs on retransmitted TSNs.
*/
if (sctp_cmt_on_off == 1) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) {
chk->no_fr_allowed = 1;
}
} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
@ -796,7 +796,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
/* we did not subtract the same things? */
audit_tf = 1;
}
if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
}
#ifdef SCTP_DEBUG
@ -856,7 +856,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
}
TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
if (chk->sent < SCTP_DATAGRAM_RESEND) {
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
chk->whoTo->flight_size,
chk->book_size,
@ -934,10 +934,10 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
struct sctp_nets *alt;
int win_probe, num_mk;
if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
}
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
struct sctp_nets *lnet;
TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
@ -964,7 +964,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* addition, find an alternate destination with PF-based
* find_alt_net().
*/
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) {
net->dest_state |= SCTP_ADDR_PF;
net->last_active = sctp_get_tick_count();
@ -972,7 +972,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
net);
}
alt = sctp_find_alternate_net(stcb, net, 2);
} else if (sctp_cmt_on_off) {
} else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
/*
* CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being
* used, then pick dest with largest ssthresh for any
@ -1082,7 +1082,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
}
}
} else if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
} else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf) && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
/*
* JRS 5/14/07 - If the destination hasn't failed completely
* but is in PF state, a PF-heartbeat needs to be sent
@ -1122,7 +1122,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
}
}
}
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
}
return (0);
@ -1616,7 +1616,7 @@ sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
else if (ret == 0) {
break;
}
if (cnt_sent >= sctp_hb_maxburst)
if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))
break;
}
}
@ -1833,7 +1833,7 @@ sctp_iterator_timer(struct sctp_iterator *it)
done_with_iterator:
SCTP_ITERATOR_UNLOCK();
SCTP_INP_INFO_WLOCK();
TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
/* stopping the callout is not needed, in theory */
SCTP_INP_INFO_WUNLOCK();
(void)SCTP_OS_TIMER_STOP(&it->tmr.timer);

View File

@ -934,8 +934,8 @@ struct sctpstat {
#define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1)
#define SCTP_STAT_DECR(_x) SCTP_STAT_DECR_BY(_x,1)
#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&sctpstat._x, _d)
#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&sctpstat._x, _d)
#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d)
#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d)
/* The following macros are for handling MIB values, */
#define SCTP_STAT_INCR_COUNTER32(_x) SCTP_STAT_INCR(_x)

View File

@ -57,48 +57,63 @@ __FBSDID("$FreeBSD$");
void
sctp_init(void)
{
/* Init the SCTP pcb in sctp_pcb.c */
u_long sb_max_adj;
sctp_pcb_init();
bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
/* Initialize and modify the sysctled variables */
sctp_init_sysctls();
if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
sctp_max_chunks_on_queue = (nmbclusters / 8);
SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
/*
* Allow a user to take no more than 1/2 the number of clusters or
* the SB_MAX whichever is smaller for the send window.
*/
sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
sctp_sendspace = min(sb_max_adj,
SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
(((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
/*
* Now for the recv window, should we take the same amount? or
* should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
* now I will just copy.
*/
sctp_recvspace = sctp_sendspace;
SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
SCTP_BASE_VAR(first_time) = 0;
SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
sctp_pcb_init();
#if defined(SCTP_PACKET_LOGGING)
SCTP_BASE_VAR(packet_log_writers) = 0;
SCTP_BASE_VAR(packet_log_end) = 0;
bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
#endif
}
void
sctp_finish(void)
{
sctp_pcb_finish();
}
/*
* cleanup of the sctppcbinfo structure.
* Assumes that the sctppcbinfo lock is held.
* cleanup of the SCTP_BASE_INFO() structure.
* Assumes that the SCTP_BASE_INFO() lock is held.
*/
void
sctp_pcbinfo_cleanup(void)
{
/* free the hash tables */
if (sctppcbinfo.sctp_asochash != NULL)
SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark);
if (sctppcbinfo.sctp_ephash != NULL)
SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark);
if (sctppcbinfo.sctp_tcpephash != NULL)
SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark);
if (sctppcbinfo.sctp_restarthash != NULL)
SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark);
if (SCTP_BASE_INFO(sctp_asochash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark));
if (SCTP_BASE_INFO(sctp_ephash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
if (SCTP_BASE_INFO(sctp_restarthash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_restarthash), SCTP_BASE_INFO(hashrestartmark));
}
@ -134,7 +149,7 @@ sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
}
chk->sent = SCTP_DATAGRAM_RESEND;
chk->rec.data.doing_fast_retransmit = 0;
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
chk->whoTo->flight_size,
chk->book_size,
@ -293,7 +308,7 @@ sctp_notify(struct sctp_inpcb *inp,
* PF state.
*/
/* Stop any running T3 timers here? */
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
net);
@ -521,7 +536,7 @@ sctp_attach(struct socket *so, int proto, struct thread *p)
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return EINVAL;
}
error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace);
error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
if (error) {
return error;
}
@ -1674,7 +1689,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
struct sctp_assoc_value *av;
SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
if (sctp_cmt_on_off) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
SCTP_FIND_STCB(inp, stcb, av->assoc_id);
if (stcb) {
av->assoc_value = stcb->asoc.sctp_cmt_on_off;
@ -2740,7 +2755,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
struct sctp_assoc_value *av;
SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
if (sctp_cmt_on_off) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
SCTP_FIND_STCB(inp, stcb, av->assoc_id);
if (stcb) {
stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
@ -3699,8 +3714,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
stcb->asoc.maxrto = new_max;
stcb->asoc.minrto = new_min;
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDOM);
error = EDOM;
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
}
SCTP_TCB_UNLOCK(stcb);
} else {
@ -3722,8 +3737,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
inp->sctp_ep.sctp_maxrto = new_max;
inp->sctp_ep.sctp_minrto = new_min;
} else {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDOM);
error = EDOM;
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
}
SCTP_INP_WUNLOCK(inp);
}
@ -4190,7 +4205,7 @@ sctp_listen(struct socket *so, int backlog, struct thread *p)
}
SCTP_INP_RLOCK(inp);
#ifdef SCTP_LOCK_LOGGING
if (sctp_logging_level & SCTP_LOCK_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
}
#endif

View File

@ -73,24 +73,24 @@ extern struct pr_usrreqs sctp_usrreqs;
*/
#define sctp_free_a_readq(_stcb, _readq) { \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, (_readq)); \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
SCTP_DECR_READQ_COUNT(); \
}
#define sctp_alloc_a_readq(_stcb, _readq) { \
(_readq) = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_readq, struct sctp_queued_to_read); \
(_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \
if ((_readq)) { \
SCTP_INCR_READQ_COUNT(); \
} \
}
#define sctp_free_a_strmoq(_stcb, _strmoq) { \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, (_strmoq)); \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
SCTP_DECR_STRMOQ_COUNT(); \
}
#define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
(_strmoq) = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending); \
(_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
if ((_strmoq)) { \
SCTP_INCR_STRMOQ_COUNT(); \
} \
@ -104,24 +104,24 @@ extern struct pr_usrreqs sctp_usrreqs;
sctp_free_remote_addr((_chk)->whoTo); \
(_chk)->whoTo = NULL; \
} \
if (((_stcb)->asoc.free_chunk_cnt > sctp_asoc_free_resc_limit) || \
(sctppcbinfo.ipi_free_chunks > sctp_system_free_resc_limit)) { \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, (_chk)); \
if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \
(SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
SCTP_DECR_CHK_COUNT(); \
} else { \
TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
(_stcb)->asoc.free_chunk_cnt++; \
atomic_add_int(&sctppcbinfo.ipi_free_chunks, 1); \
atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
} \
} else { \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, (_chk)); \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
SCTP_DECR_CHK_COUNT(); \
} \
}
#define sctp_alloc_a_chunk(_stcb, _chk) { \
if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
(_chk) = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk, struct sctp_tmit_chunk); \
(_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \
if ((_chk)) { \
SCTP_INCR_CHK_COUNT(); \
(_chk)->whoTo = NULL; \
@ -129,7 +129,7 @@ extern struct pr_usrreqs sctp_usrreqs;
} else { \
(_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
SCTP_STAT_INCR(sctps_cached_chk); \
(_stcb)->asoc.free_chunk_cnt--; \
} \
@ -153,7 +153,7 @@ extern struct pr_usrreqs sctp_usrreqs;
} \
(__net)->src_addr_selected = 0; \
(__net)->dest_state = SCTP_ADDR_NOT_REACHABLE; \
SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_net, (__net)); \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
SCTP_DECR_RADDR_COUNT(); \
} \
} \
@ -306,6 +306,7 @@ void sctp_pathmtu_adjustment __P((struct sctp_inpcb *, struct sctp_tcb *, struct
void sctp_drain __P((void));
void sctp_init __P((void));
void sctp_finish(void);
void sctp_pcbinfo_cleanup(void);
int sctp_flush(struct socket *, int);

View File

@ -340,7 +340,7 @@ sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
}
sctp_clog.x.lock.info_lock = rw_wowned(&sctppcbinfo.ipi_ep_mtx);
sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
if (inp->sctp_socket) {
sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
@ -894,9 +894,9 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
asoc->max_burst = m->sctp_ep.max_burst;
asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
asoc->cookie_life = m->sctp_ep.def_cookie_life;
asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
/* JRS 5/21/07 - Init CMT PF variables */
asoc->sctp_cmt_pf = (uint8_t) sctp_cmt_pf;
asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
asoc->sctp_frag_point = m->sctp_frag_point;
#ifdef INET
asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
@ -1341,22 +1341,22 @@ sctp_iterator_worker(void)
/* This function is called with the WQ lock in place */
sctppcbinfo.iterator_running = 1;
SCTP_BASE_INFO(iterator_running) = 1;
again:
it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
while (it) {
/* now lets work on this one */
TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
SCTP_IPI_ITERATOR_WQ_UNLOCK();
sctp_iterator_work(it);
SCTP_IPI_ITERATOR_WQ_LOCK();
/* sa_ignore FREED_MEMORY */
it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
}
if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
goto again;
}
sctppcbinfo.iterator_running = 0;
SCTP_BASE_INFO(iterator_running) = 0;
return;
}
@ -1383,12 +1383,12 @@ sctp_handle_addr_wq(void)
LIST_INIT(&asc->list_of_work);
asc->cnt = 0;
SCTP_IPI_ITERATOR_WQ_LOCK();
wi = LIST_FIRST(&sctppcbinfo.addr_wq);
wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
while (wi != NULL) {
LIST_REMOVE(wi, sctp_nxt_addr);
LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
asc->cnt++;
wi = LIST_FIRST(&sctppcbinfo.addr_wq);
wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
}
SCTP_IPI_ITERATOR_WQ_UNLOCK();
if (asc->cnt == 0) {
@ -1901,7 +1901,7 @@ sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
break;
case SCTP_TIMER_TYPE_ADDR_WQ:
/* Only 1 tick away :-) */
tmr = &sctppcbinfo.addr_wq_timer;
tmr = &SCTP_BASE_INFO(addr_wq_timer);
to_ticks = SCTP_ADDRESS_TICK_DELAY;
break;
case SCTP_TIMER_TYPE_ITERATOR:
@ -2158,8 +2158,8 @@ sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
} else {
msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
}
if (msec < sctp_early_fr_msec) {
msec = sctp_early_fr_msec;
if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
if (msec < SCTP_MINFR_MSEC_FLOOR) {
msec = SCTP_MINFR_MSEC_FLOOR;
}
@ -2259,7 +2259,7 @@ sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
tmr = &inp->sctp_ep.zero_copy_sendq_timer;
break;
case SCTP_TIMER_TYPE_ADDR_WQ:
tmr = &sctppcbinfo.addr_wq_timer;
tmr = &SCTP_BASE_INFO(addr_wq_timer);
break;
case SCTP_TIMER_TYPE_EARLYFR:
if ((stcb == NULL) || (net == NULL)) {
@ -2708,7 +2708,7 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
if (net->RTO_measured) {
calc_time -= (net->lastsa >> SCTP_RTT_SHIFT); /* take away 1/8th when
* shift=3 */
if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
rto_logging(net, SCTP_LOG_RTTVAR);
}
net->prev_rtt = o_calctime;
@ -2734,7 +2734,7 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
}
first_measure = 1;
net->prev_rtt = o_calctime;
if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
rto_logging(net, SCTP_LOG_INITIAL_RTT);
}
}
@ -3325,11 +3325,11 @@ sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
SCTP_INP_READ_LOCK(stcb->sctp_ep);
}
sb = &stcb->sctp_socket->so_rcv;
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
}
sctp_sballoc(stcb, sb, m_notify);
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
@ -4299,11 +4299,11 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
TAILQ_INSERT_TAIL(&tmp_queue, control, next);
m = control->data;
while (m) {
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
}
sctp_sbfree(control, stcb, &old_so->so_rcv, m);
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
m = SCTP_BUF_NEXT(m);
@ -4323,11 +4323,11 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
m = control->data;
while (m) {
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
}
sctp_sballoc(stcb, &new_so->so_rcv, m);
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
m = SCTP_BUF_NEXT(m);
@ -4391,11 +4391,11 @@ sctp_add_to_readq(struct sctp_inpcb *inp,
continue;
}
prev = m;
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
}
sctp_sballoc(stcb, sb, m);
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
atomic_add_int(&control->length, SCTP_BUF_LEN(m));
@ -4498,11 +4498,11 @@ sctp_append_to_readq(struct sctp_inpcb *inp,
prev = mm;
len += SCTP_BUF_LEN(mm);
if (sb) {
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
}
sctp_sballoc(stcb, sb, mm);
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
}
@ -4622,7 +4622,7 @@ sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
return;
}
asoc->chunks_on_out_queue -= chk_cnt;
if (sctp_logging_level & SCTP_MBCNT_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
asoc->total_output_queue_size,
tp1->book_size,
@ -5043,11 +5043,11 @@ sctp_sorecvmsg(struct socket *so,
if (rwnd_req < SCTP_MIN_RWND)
rwnd_req = SCTP_MIN_RWND;
in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SORECV_ENTER,
rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
}
if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SORECV_ENTERPL,
rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
}
@ -5541,12 +5541,12 @@ sctp_sorecvmsg(struct socket *so,
copied_so_far += cp_len;
} else {
/* dispose of the mbuf */
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&so->so_rcv,
control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
}
sctp_sbfree(control, stcb, &so->so_rcv, m);
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&so->so_rcv,
control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
@ -5593,7 +5593,7 @@ sctp_sorecvmsg(struct socket *so,
if ((in_flags & MSG_PEEK) == 0) {
SCTP_BUF_RESV_UF(m, cp_len);
SCTP_BUF_LEN(m) -= cp_len;
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
}
atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
@ -5605,7 +5605,7 @@ sctp_sorecvmsg(struct socket *so,
embuf = m;
freed_so_far += cp_len;
freed_so_far += MSIZE;
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
SCTP_LOG_SBRESULT, 0);
}
@ -5828,14 +5828,14 @@ sctp_sorecvmsg(struct socket *so,
*mp = control->data;
m = control->data;
while (m) {
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&so->so_rcv,
control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
}
sctp_sbfree(control, stcb, &so->so_rcv, m);
freed_so_far += SCTP_BUF_LEN(m);
freed_so_far += MSIZE;
if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
sctp_sblog(&so->so_rcv,
control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
}
@ -5909,7 +5909,7 @@ sctp_sorecvmsg(struct socket *so,
/* Save the value back for next time */
stcb->freed_by_sorcv_sincelast = freed_so_far;
}
if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
if (stcb) {
sctp_misc_ints(SCTP_SORECV_DONE,
freed_so_far,
@ -5935,7 +5935,7 @@ sctp_sorecvmsg(struct socket *so,
struct mbuf *
sctp_m_free(struct mbuf *m)
{
if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
if (SCTP_BUF_IS_EXTENDED(m)) {
sctp_log_mb(m, SCTP_MBUF_IFREE);
}
@ -5971,7 +5971,7 @@ sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
* Now that we have the ifa we must awaken the iterator with this
* message.
*/
wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
if (wi == NULL) {
SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
return (ENOMEM);
@ -5990,7 +5990,7 @@ sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
* Should this really be a tailq? As it is we will process the
* newest first :-0
*/
LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
(struct sctp_inpcb *)NULL,
(struct sctp_tcb *)NULL,
@ -6586,24 +6586,24 @@ sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_
uint32_t saveindex, newindex;
do {
saveindex = sctp_log.index;
saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
newindex = 1;
} else {
newindex = saveindex + 1;
}
} while (atomic_cmpset_int(&sctp_log.index, saveindex, newindex) == 0);
} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
saveindex = 0;
}
sctp_log.entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
sctp_log.entry[saveindex].subsys = subsys;
sctp_log.entry[saveindex].params[0] = a;
sctp_log.entry[saveindex].params[1] = b;
sctp_log.entry[saveindex].params[2] = c;
sctp_log.entry[saveindex].params[3] = d;
sctp_log.entry[saveindex].params[4] = e;
sctp_log.entry[saveindex].params[5] = f;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
}
#endif

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_input.h>
#include <netinet/sctp_output.h>
#include <netinet/sctp_bsd_addr.h>
#include <netinet/udp.h>
#ifdef IPSEC
#include <netipsec/ipsec.h>
@ -79,6 +80,7 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
struct sctp_tcb *stcb = NULL;
int pkt_len = 0;
int off = *offp;
uint16_t port = 0;
/* get the VRF and table id's */
if (SCTP_GET_PKT_VRFID(*i_pak, vrf_id)) {
@ -125,7 +127,7 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
if (sh->dest_port == 0)
goto bad;
check = sh->checksum; /* save incoming checksum */
if ((check == 0) && (sctp_no_csum_on_loopback) &&
if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
(IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst))) {
goto sctp_skip_csum;
}
@ -136,6 +138,12 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
calc_check, check, m, mlen, iphlen);
stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
sh, ch, &in6p, &net, vrf_id);
if ((net) && (port)) {
if (net->port == 0) {
sctp_pathmtu_adjustment(in6p, stcb, net, net->mtu - sizeof(struct udphdr));
}
net->port = port;
}
/* in6p's ref-count increased && stcb locked */
if ((in6p) && (stcb)) {
sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
@ -157,6 +165,12 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
*/
stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
sh, ch, &in6p, &net, vrf_id);
if ((net) && (port)) {
if (net->port == 0) {
sctp_pathmtu_adjustment(in6p, stcb, net, net->mtu - sizeof(struct udphdr));
}
net->port = port;
}
/* in6p's ref-count increased */
if (in6p == NULL) {
struct sctp_init_chunk *init_chk, chunk_buf;
@ -177,14 +191,14 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
sh->v_tag = 0;
}
if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, 0);
sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
goto bad;
}
if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
goto bad;
}
if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, 0);
sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
goto bad;
} else if (stcb == NULL) {
refcount_up = 1;
@ -212,7 +226,7 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto)
/* sa_ignore NO_NULL_CHK */
sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
in6p, stcb, net, ecn_bits, vrf_id, 0);
in6p, stcb, net, ecn_bits, vrf_id, port);
/* inp's ref-count reduced && stcb unlocked */
/* XXX this stuff below gets moved to appropriate parts later... */
if (m)
@ -384,7 +398,7 @@ sctp6_notify(struct sctp_inpcb *inp,
* PF state.
*/
/* Stop any running T3 timers here? */
if (sctp_cmt_on_off && sctp_cmt_pf) {
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
net);
@ -644,7 +658,7 @@ sctp6_attach(struct socket *so, int proto, struct thread *p)
return EINVAL;
}
if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace);
error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
if (error)
return error;
}