tcp: request tracking is not http specific.

This change is a name change only. TCP Request tracking can track sendfile and even non-sendfile requests. The
names however in the current code use http, and they should not. The feature is not http specific. Lets change the
name so they more properly reflect whats going on. This also fixes conflicts with http_req which caused application pain.

Reviewed by: tuexen
Sponsored by: Netflix Inc
Differential Revision:https://reviews.freebsd.org/D40229
This commit is contained in:
Randall Stewart 2023-05-24 06:35:36 -04:00
parent 08637d5d15
commit 57a3a161a9
7 changed files with 222 additions and 223 deletions

View File

@ -463,7 +463,7 @@ struct tcp_function_set {
/*
* TCP log user opaque
*/
struct http_req {
struct tcp_snd_req {
uint64_t timestamp;
uint64_t start;
uint64_t end;
@ -471,7 +471,7 @@ struct http_req {
};
union tcp_log_userdata {
struct http_req http_req;
struct tcp_snd_req tcp_req;
};
struct tcp_log_user {
@ -501,7 +501,7 @@ struct tcp_log_user {
#define TCP_HYBRID_PACING_SETMSS 0x1000 /* Internal flag that tellsus we set the mss on this entry */
struct tcp_hybrid_req {
struct http_req req;
struct tcp_snd_req req;
uint64_t cspr;
uint32_t hint_maxseg;
uint32_t hybrid_flags;

View File

@ -1,3 +1,4 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@ -2852,7 +2853,7 @@ tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
struct inpcb *inp;
struct tcpcb *tp;
#ifdef TCP_REQUEST_TRK
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
int i, fnd;
#endif
@ -2885,27 +2886,27 @@ tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
TCP_LOG_SENDFILE, 0, 0, &log, false, &tv);
}
#ifdef TCP_REQUEST_TRK
if (tp->t_http_req == 0) {
if (tp->t_tcpreq_req == 0) {
/* No http requests to track */
goto done;
}
fnd = 0;
if (tp->t_http_closed == 0) {
if (tp->t_tcpreq_closed == 0) {
/* No closed end req to track */
goto skip_closed_req;
}
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
/* Lets see if this one can be found */
ent = &tp->t_http_info[i];
if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) {
ent = &tp->t_tcpreq_info[i];
if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
/* Not used */
continue;
}
if (ent->flags & TCP_HTTP_TRACK_FLG_OPEN) {
if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
/* This pass does not consider open requests */
continue;
}
if (ent->flags & TCP_HTTP_TRACK_FLG_COMP) {
if (ent->flags & TCP_TRK_TRACK_FLG_COMP) {
/* Don't look at what we have completed */
continue;
}
@ -2919,7 +2920,7 @@ tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
/*
* It is at or past the end, its complete.
*/
ent->flags |= TCP_HTTP_TRACK_FLG_SEQV;
ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
/*
* When an entry completes we can take (snd_una + sb_cc) and know where
* the end of the range really is. Note that this works since two
@ -2934,10 +2935,10 @@ tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
if (SEQ_GT((tp->snd_una + so->so_snd.sb_ccc), ent->end_seq))
ent->end_seq = tp->snd_una + so->so_snd.sb_ccc;
if ((offset + nbytes) >= ent->end) {
ent->flags |= TCP_HTTP_TRACK_FLG_COMP;
tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_COMPLETE, offset, nbytes);
ent->flags |= TCP_TRK_TRACK_FLG_COMP;
tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_COMPLETE, offset, nbytes);
} else {
tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_MOREYET, offset, nbytes);
tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_MOREYET, offset, nbytes);
}
/* We assume that sendfile never sends overlapping requests */
goto done;
@ -2946,23 +2947,23 @@ tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
skip_closed_req:
if (!fnd) {
/* Ok now lets look for open requests */
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
ent = &tp->t_http_info[i];
if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) {
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
ent = &tp->t_tcpreq_info[i];
if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
/* Not used */
continue;
}
if ((ent->flags & TCP_HTTP_TRACK_FLG_OPEN) == 0)
if ((ent->flags & TCP_TRK_TRACK_FLG_OPEN) == 0)
continue;
/* If we reach here its an allocated open request */
if (ent->start == offset) {
/* It begins this request */
ent->start_seq = tp->snd_una +
tptosocket(tp)->so_snd.sb_ccc;
ent->flags |= TCP_HTTP_TRACK_FLG_SEQV;
ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
break;
} else if (offset > ent->start) {
ent->flags |= TCP_HTTP_TRACK_FLG_SEQV;
ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
break;
}
}

View File

@ -259,7 +259,7 @@ enum tcp_log_events {
TCP_TIMELY_WORK, /* Logs regarding Timely CC tweaks 58 */
TCP_LOG_USER_EVENT, /* User space event data 59 */
TCP_LOG_SENDFILE, /* sendfile() logging for TCP connections 60 */
TCP_LOG_HTTP_T, /* logging of http request tracking 61 */
TCP_LOG_REQ_T, /* logging of request tracking 61 */
TCP_LOG_ACCOUNTING, /* Log of TCP Accounting data 62 */
TCP_LOG_FSB, /* FSB information 63 */
RACK_DSACK_HANDLING, /* Handling of DSACK in rack for reordering window 64 */
@ -371,7 +371,7 @@ struct tcp_log_dev_log_queue {
#define TCP_TP_ENOBUF 0x00000002 /* When we hit enobufs with software pacing */
#define TCP_TP_COLLAPSED_WND 0x00000003 /* When a peer to collapses its rwnd on us */
#define TCP_TP_COLLAPSED_RXT 0x00000004 /* When we actually retransmit a collapsed window rsm */
#define TCP_TP_HTTP_LOG_FAIL 0x00000005 /* We tried to allocate a HTTP log but had no space */
#define TCP_TP_REQ_LOG_FAIL 0x00000005 /* We tried to allocate a Request log but had no space */
#define TCP_TP_RESET_RCV 0x00000006 /* Triggers when we receive a RST */
#define TCP_TP_EXCESS_RXT 0x00000007 /* When we get excess RXT's clamping the cwnd */
#define TCP_TP_SAD_TRIGGERED 0x00000008 /* Sack Attack Detection triggers */
@ -511,7 +511,7 @@ tcp_trace_point(struct tcpcb *tp, int num)
* your point will never come out. You specify your defined point in the bbpoint
* side of the inline. An example of this you can find in rack where the
* TCP_BBPOINT_REQ_LEVEL_LOGGING is used. There a specific set of logs are generated
* for each http request that rack is tracking.
* for each request that tcp is tracking.
*
* When turning on BB logging use the inline:
* tcp_set_bblog_state(struct tcpcb *tp, uint8_t ls, uint8_t bbpoint)

View File

@ -559,7 +559,7 @@ static int
rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
static void rack_chk_http_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts);
static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts);
struct rack_sendmap *
tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
uint32_t tsused);
@ -1967,7 +1967,7 @@ rack_get_fixed_pacing_bw(struct tcp_rack *rack)
static void
rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim,
uint64_t data, uint8_t mod, uint16_t aux,
struct http_sendfile_track *cur)
struct tcp_sendfile_track *cur)
{
#ifdef TCP_REQUEST_TRK
int do_log = 0;
@ -2049,8 +2049,8 @@ rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t t
/* localtime = <delivered | applimited>*/
log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff);
log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_http_info[0]);
log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct http_sendfile_track));
off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs);
log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs);
log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags;
@ -2126,7 +2126,7 @@ rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped)
* is in bw_rate_cap, but we need to look at
* how long it is until we hit the deadline.
*/
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
ent = rack->r_ctl.rc_last_sft;
microuptime(&tv);
@ -2153,7 +2153,7 @@ rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped)
* Now ideally we want to use the end_seq to figure out how much more
* but it might not be possible (only if we have the TRACK_FG_COMP on the entry..
*/
if (ent->flags & TCP_HTTP_TRACK_FLG_COMP) {
if (ent->flags & TCP_TRK_TRACK_FLG_COMP) {
if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una))
lenleft = ent->end_seq - rack->rc_tp->snd_una;
else {
@ -8364,7 +8364,7 @@ rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
/* First question is it a retransmission or new? */
if (seq_out == snd_max) {
/* Its new */
rack_chk_http_and_hybrid_on_out(rack, seq_out, len, cts);
rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts);
again:
rsm = rack_alloc(rack);
if (rsm == NULL) {
@ -11552,7 +11552,7 @@ rack_check_bottom_drag(struct tcpcb *tp,
#ifdef TCP_REQUEST_TRK
static void
rack_log_hybrid(struct tcp_rack *rack, uint32_t seq,
struct http_sendfile_track *cur, uint8_t mod, int line, int err)
struct tcp_sendfile_track *cur, uint8_t mod, int line, int err)
{
int do_log;
@ -11593,8 +11593,8 @@ rack_log_hybrid(struct tcp_rack *rack, uint32_t seq,
log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff);
log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ;
log.u_bbr.bbr_state = 1;
off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_http_info[0]);
log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct http_sendfile_track));
off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
} else {
log.u_bbr.flex2 = err;
}
@ -11626,15 +11626,15 @@ rack_log_hybrid(struct tcp_rack *rack, uint32_t seq,
static void
rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len)
{
struct http_sendfile_track *rc_cur;
struct tcp_sendfile_track *rc_cur;
struct tcpcb *tp;
int err = 0;
rc_cur = tcp_http_find_req_for_seq(rack->rc_tp, seq);
rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq);
if (rc_cur == NULL) {
/* If not in the beginning what about the end piece */
rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
rc_cur = tcp_http_find_req_for_seq(rack->rc_tp, (seq + len - 1));
rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1));
} else {
err = 12345;
}
@ -11728,14 +11728,14 @@ rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len)
#endif
static void
rack_chk_http_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts)
rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts)
{
#ifdef TCP_REQUEST_TRK
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
ent = rack->r_ctl.rc_last_sft;
if ((ent == NULL) ||
(ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) ||
(ent->flags == TCP_TRK_TRACK_FLG_EMPTY) ||
(SEQ_GEQ(seq, ent->end_seq))) {
/* Time to update the track. */
rack_set_dgp_hybrid_mode(rack, seq, len);
@ -11760,8 +11760,8 @@ rack_chk_http_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len
rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent);
}
/* Now validate we have set the send time of this one */
if ((ent->flags & TCP_HTTP_TRACK_FLG_FSND) == 0) {
ent->flags |= TCP_HTTP_TRACK_FLG_FSND;
if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) {
ent->flags |= TCP_TRK_TRACK_FLG_FSND;
ent->first_send = cts;
ent->sent_at_fs = rack->rc_tp->t_sndbytes;
ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes;
@ -11908,9 +11908,9 @@ rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb)
#ifdef TCP_REQUEST_TRK
static inline void
rack_http_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
{
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
int i;
if ((rack->rc_hybrid_mode == 0) &&
@ -11919,7 +11919,7 @@ rack_http_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
* Just do normal completions hybrid pacing is not on
* and CLDL is off as well.
*/
tcp_http_check_for_comp(rack->rc_tp, th_ack);
tcp_req_check_for_comp(rack->rc_tp, th_ack);
return;
}
/*
@ -11929,12 +11929,12 @@ rack_http_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
* need to find all entries that are completed by th_ack not
* just a single entry and do our logging.
*/
ent = tcp_http_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
while (ent != NULL) {
/*
* We may be doing hybrid pacing or CLDL and need more details possibly
* so we do it manually instead of calling
* tcp_http_check_for_comp()
* tcp_req_check_for_comp()
*/
uint64_t laa, tim, data, cbw, ftim;
@ -11944,7 +11944,7 @@ rack_http_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
/* calculate the time based on the ack arrival */
data = ent->end - ent->start;
laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
if (ent->flags & TCP_HTTP_TRACK_FLG_FSND) {
if (ent->flags & TCP_TRK_TRACK_FLG_FSND) {
if (ent->first_send > ent->localtime)
ftim = ent->first_send;
else
@ -11971,11 +11971,11 @@ rack_http_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
if (ent == rack->r_ctl.rc_last_sft)
rack->r_ctl.rc_last_sft = NULL;
/* Generate the log that the tcp_netflix call would have */
tcp_http_log_req_info(rack->rc_tp, ent,
i, TCP_HTTP_REQ_LOG_FREED, 0, 0);
tcp_req_log_req_info(rack->rc_tp, ent,
i, TCP_TRK_REQ_LOG_FREED, 0, 0);
/* Free it and see if there is another one */
tcp_http_free_a_slot(rack->rc_tp, ent);
ent = tcp_http_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
tcp_req_free_a_slot(rack->rc_tp, ent);
ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
}
}
#endif
@ -12126,7 +12126,7 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
#ifdef TCP_REQUEST_TRK
rack_http_check_for_comp(rack, th->th_ack);
rack_req_check_for_comp(rack, th->th_ack);
#endif
}
/*
@ -12984,7 +12984,7 @@ rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
#ifdef TCP_REQUEST_TRK
rack_http_check_for_comp(rack, th->th_ack);
rack_req_check_for_comp(rack, th->th_ack);
#endif
}
/*
@ -15571,12 +15571,12 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
uint8_t xx = 0;
#ifdef TCP_REQUEST_TRK
struct http_sendfile_track *http_req;
struct tcp_sendfile_track *tcp_req;
if (SEQ_GT(ae->ack, tp->snd_una)) {
http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1));
} else {
http_req = tcp_http_find_req_for_seq(tp, ae->ack);
tcp_req = tcp_req_find_req_for_seq(tp, ae->ack);
}
#endif
memset(&log.u_bbr, 0, sizeof(log.u_bbr));
@ -15618,29 +15618,29 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
/* Log the rcv time */
log.u_bbr.delRate = ae->timestamp;
#ifdef TCP_REQUEST_TRK
log.u_bbr.applimited = tp->t_http_closed;
log.u_bbr.applimited = tp->t_tcpreq_closed;
log.u_bbr.applimited <<= 8;
log.u_bbr.applimited |= tp->t_http_open;
log.u_bbr.applimited |= tp->t_tcpreq_open;
log.u_bbr.applimited <<= 8;
log.u_bbr.applimited |= tp->t_http_req;
if (http_req) {
log.u_bbr.applimited |= tp->t_tcpreq_req;
if (tcp_req) {
/* Copy out any client req info */
/* seconds */
log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC);
/* useconds */
log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
log.u_bbr.rttProp = http_req->timestamp;
log.u_bbr.cur_del_rate = http_req->start;
if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC);
log.u_bbr.rttProp = tcp_req->timestamp;
log.u_bbr.cur_del_rate = tcp_req->start;
if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) {
log.u_bbr.flex8 |= 1;
} else {
log.u_bbr.flex8 |= 2;
log.u_bbr.bw_inuse = http_req->end;
log.u_bbr.bw_inuse = tcp_req->end;
}
log.u_bbr.flex6 = http_req->start_seq;
if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
log.u_bbr.flex6 = tcp_req->start_seq;
if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) {
log.u_bbr.flex8 |= 4;
log.u_bbr.epoch = http_req->end_seq;
log.u_bbr.epoch = tcp_req->end_seq;
}
}
#endif
@ -16028,7 +16028,7 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
rack_process_to_cumack(tp, rack, ae->ack, cts, to,
tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
#ifdef TCP_REQUEST_TRK
rack_http_check_for_comp(rack, high_seq);
rack_req_check_for_comp(rack, high_seq);
#endif
if (rack->rc_dsack_round_seen) {
/* Is the dsack round over? */
@ -16665,12 +16665,12 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
union tcp_log_stackspecific log;
struct timeval ltv;
#ifdef TCP_REQUEST_TRK
struct http_sendfile_track *http_req;
struct tcp_sendfile_track *tcp_req;
if (SEQ_GT(th->th_ack, tp->snd_una)) {
http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1));
} else {
http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack);
}
#endif
memset(&log.u_bbr, 0, sizeof(log.u_bbr));
@ -16711,29 +16711,29 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
/* Log the rcv time */
log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
#ifdef TCP_REQUEST_TRK
log.u_bbr.applimited = tp->t_http_closed;
log.u_bbr.applimited = tp->t_tcpreq_closed;
log.u_bbr.applimited <<= 8;
log.u_bbr.applimited |= tp->t_http_open;
log.u_bbr.applimited |= tp->t_tcpreq_open;
log.u_bbr.applimited <<= 8;
log.u_bbr.applimited |= tp->t_http_req;
if (http_req) {
log.u_bbr.applimited |= tp->t_tcpreq_req;
if (tcp_req) {
/* Copy out any client req info */
/* seconds */
log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC);
/* useconds */
log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
log.u_bbr.rttProp = http_req->timestamp;
log.u_bbr.cur_del_rate = http_req->start;
if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC);
log.u_bbr.rttProp = tcp_req->timestamp;
log.u_bbr.cur_del_rate = tcp_req->start;
if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) {
log.u_bbr.flex8 |= 1;
} else {
log.u_bbr.flex8 |= 2;
log.u_bbr.bw_inuse = http_req->end;
log.u_bbr.bw_inuse = tcp_req->end;
}
log.u_bbr.flex6 = http_req->start_seq;
if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
log.u_bbr.flex6 = tcp_req->start_seq;
if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) {
log.u_bbr.flex8 |= 4;
log.u_bbr.epoch = http_req->end_seq;
log.u_bbr.epoch = tcp_req->end_seq;
}
}
#endif
@ -22603,7 +22603,7 @@ static int
process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid)
{
#ifdef TCP_REQUEST_TRK
struct http_sendfile_track *sft;
struct tcp_sendfile_track *sft;
struct timeval tv;
tcp_seq seq;
int err;
@ -22629,7 +22629,7 @@ process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid)
rack->r_ctl.rc_fixed_pacing_rate_ca = 0;
rack->r_ctl.rc_fixed_pacing_rate_ss = 0;
/* Now allocate or find our entry that will have these settings */
sft = tcp_http_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0);
sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0);
if (sft == NULL) {
rack->rc_tp->tcp_hybrid_error++;
/* no space, where would it have gone? */

View File

@ -452,7 +452,7 @@ struct rack_control {
uint64_t lt_bw_time; /* Total time with data outstanding (lt_bw = long term bandwidth) */
uint64_t lt_bw_bytes; /* Total bytes acked */
uint64_t lt_timemark; /* 64 bit timestamp when we started sending */
struct http_sendfile_track *rc_last_sft;
struct tcp_sendfile_track *rc_last_sft;
uint32_t lt_seq; /* Seq at start of lt_bw gauge */
int32_t rc_rtt_diff; /* Timely style rtt diff of our gp_srtt */
uint64_t last_sndbytes;

View File

@ -4304,10 +4304,10 @@ tcp_estimate_tls_overhead(struct socket *so, uint64_t tls_usr_bytes)
extern uint32_t tcp_stale_entry_time;
uint32_t tcp_stale_entry_time = 250000;
SYSCTL_UINT(_net_inet_tcp, OID_AUTO, usrlog_stale, CTLFLAG_RW,
&tcp_stale_entry_time, 250000, "Time that a http entry without a sendfile ages out");
&tcp_stale_entry_time, 250000, "Time that a tcpreq entry without a sendfile ages out");
void
tcp_http_log_req_info(struct tcpcb *tp, struct http_sendfile_track *http,
tcp_req_log_req_info(struct tcpcb *tp, struct tcp_sendfile_track *req,
uint16_t slot, uint8_t val, uint64_t offset, uint64_t nbytes)
{
if (tcp_bblogging_on(tp)) {
@ -4319,61 +4319,61 @@ tcp_http_log_req_info(struct tcpcb *tp, struct http_sendfile_track *http,
log.u_bbr.inhpts = tcp_in_hpts(tp);
#endif
log.u_bbr.flex8 = val;
log.u_bbr.rttProp = http->timestamp;
log.u_bbr.delRate = http->start;
log.u_bbr.cur_del_rate = http->end;
log.u_bbr.flex1 = http->start_seq;
log.u_bbr.flex2 = http->end_seq;
log.u_bbr.flex3 = http->flags;
log.u_bbr.flex4 = ((http->localtime >> 32) & 0x00000000ffffffff);
log.u_bbr.flex5 = (http->localtime & 0x00000000ffffffff);
log.u_bbr.rttProp = req->timestamp;
log.u_bbr.delRate = req->start;
log.u_bbr.cur_del_rate = req->end;
log.u_bbr.flex1 = req->start_seq;
log.u_bbr.flex2 = req->end_seq;
log.u_bbr.flex3 = req->flags;
log.u_bbr.flex4 = ((req->localtime >> 32) & 0x00000000ffffffff);
log.u_bbr.flex5 = (req->localtime & 0x00000000ffffffff);
log.u_bbr.flex7 = slot;
log.u_bbr.bw_inuse = offset;
/* nbytes = flex6 | epoch */
log.u_bbr.flex6 = ((nbytes >> 32) & 0x00000000ffffffff);
log.u_bbr.epoch = (nbytes & 0x00000000ffffffff);
/* cspr = lt_epoch | pkts_out */
log.u_bbr.lt_epoch = ((http->cspr >> 32) & 0x00000000ffffffff);
log.u_bbr.pkts_out |= (http->cspr & 0x00000000ffffffff);
log.u_bbr.applimited = tp->t_http_closed;
log.u_bbr.lt_epoch = ((req->cspr >> 32) & 0x00000000ffffffff);
log.u_bbr.pkts_out |= (req->cspr & 0x00000000ffffffff);
log.u_bbr.applimited = tp->t_tcpreq_closed;
log.u_bbr.applimited <<= 8;
log.u_bbr.applimited |= tp->t_http_open;
log.u_bbr.applimited |= tp->t_tcpreq_open;
log.u_bbr.applimited <<= 8;
log.u_bbr.applimited |= tp->t_http_req;
log.u_bbr.applimited |= tp->t_tcpreq_req;
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
TCP_LOG_EVENTP(tp, NULL,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_LOG_HTTP_T, 0,
TCP_LOG_REQ_T, 0,
0, &log, false, &tv);
}
}
void
tcp_http_free_a_slot(struct tcpcb *tp, struct http_sendfile_track *ent)
tcp_req_free_a_slot(struct tcpcb *tp, struct tcp_sendfile_track *ent)
{
if (tp->t_http_req > 0)
tp->t_http_req--;
if (ent->flags & TCP_HTTP_TRACK_FLG_OPEN) {
if (tp->t_http_open > 0)
tp->t_http_open--;
if (tp->t_tcpreq_req > 0)
tp->t_tcpreq_req--;
if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
if (tp->t_tcpreq_open > 0)
tp->t_tcpreq_open--;
} else {
if (tp->t_http_closed > 0)
tp->t_http_closed--;
if (tp->t_tcpreq_closed > 0)
tp->t_tcpreq_closed--;
}
ent->flags = TCP_HTTP_TRACK_FLG_EMPTY;
ent->flags = TCP_TRK_TRACK_FLG_EMPTY;
}
static void
tcp_http_check_for_stale_entries(struct tcpcb *tp, uint64_t ts, int rm_oldest)
tcp_req_check_for_stale_entries(struct tcpcb *tp, uint64_t ts, int rm_oldest)
{
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
uint64_t time_delta, oldest_delta;
int i, oldest, oldest_set = 0, cnt_rm = 0;
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
ent = &tp->t_http_info[i];
if (ent->flags != TCP_HTTP_TRACK_FLG_USED) {
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
ent = &tp->t_tcpreq_info[i];
if (ent->flags != TCP_TRK_TRACK_FLG_USED) {
/*
* We only care about closed end ranges
* that are allocated and have no sendfile
@ -4398,43 +4398,43 @@ tcp_http_check_for_stale_entries(struct tcpcb *tp, uint64_t ts, int rm_oldest)
* time to purge it.
*/
cnt_rm++;
tcp_http_log_req_info(tp, &tp->t_http_info[i], i, TCP_HTTP_REQ_LOG_STALE,
tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], i, TCP_TRK_REQ_LOG_STALE,
time_delta, 0);
tcp_http_free_a_slot(tp, ent);
tcp_req_free_a_slot(tp, ent);
}
}
if ((cnt_rm == 0) && rm_oldest && oldest_set) {
ent = &tp->t_http_info[oldest];
tcp_http_log_req_info(tp, &tp->t_http_info[i], i, TCP_HTTP_REQ_LOG_STALE,
ent = &tp->t_tcpreq_info[oldest];
tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], i, TCP_TRK_REQ_LOG_STALE,
oldest_delta, 1);
tcp_http_free_a_slot(tp, ent);
tcp_req_free_a_slot(tp, ent);
}
}
int
tcp_http_check_for_comp(struct tcpcb *tp, tcp_seq ack_point)
tcp_req_check_for_comp(struct tcpcb *tp, tcp_seq ack_point)
{
int i, ret=0;
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
/* Clean up any old closed end requests that are now completed */
if (tp->t_http_req == 0)
if (tp->t_tcpreq_req == 0)
return(0);
if (tp->t_http_closed == 0)
if (tp->t_tcpreq_closed == 0)
return(0);
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
ent = &tp->t_http_info[i];
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
ent = &tp->t_tcpreq_info[i];
/* Skip empty ones */
if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY)
if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY)
continue;
/* Skip open ones */
if (ent->flags & TCP_HTTP_TRACK_FLG_OPEN)
if (ent->flags & TCP_TRK_TRACK_FLG_OPEN)
continue;
if (SEQ_GEQ(ack_point, ent->end_seq)) {
/* We are past it -- free it */
tcp_http_log_req_info(tp, ent,
i, TCP_HTTP_REQ_LOG_FREED, 0, 0);
tcp_http_free_a_slot(tp, ent);
tcp_req_log_req_info(tp, ent,
i, TCP_TRK_REQ_LOG_FREED, 0, 0);
tcp_req_free_a_slot(tp, ent);
ret++;
}
}
@ -4442,13 +4442,13 @@ tcp_http_check_for_comp(struct tcpcb *tp, tcp_seq ack_point)
}
int
tcp_http_is_entry_comp(struct tcpcb *tp, struct http_sendfile_track *ent, tcp_seq ack_point)
tcp_req_is_entry_comp(struct tcpcb *tp, struct tcp_sendfile_track *ent, tcp_seq ack_point)
{
if (tp->t_http_req == 0)
if (tp->t_tcpreq_req == 0)
return(-1);
if (tp->t_http_closed == 0)
if (tp->t_tcpreq_closed == 0)
return(-1);
if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY)
if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY)
return(-1);
if (SEQ_GEQ(ack_point, ent->end_seq)) {
return (1);
@ -4456,26 +4456,26 @@ tcp_http_is_entry_comp(struct tcpcb *tp, struct http_sendfile_track *ent, tcp_se
return (0);
}
struct http_sendfile_track *
tcp_http_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *ip)
struct tcp_sendfile_track *
tcp_req_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *ip)
{
/*
* Given an ack point (th_ack) walk through our entries and
* return the first one found that th_ack goes past the
* end_seq.
*/
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
int i;
if (tp->t_http_req == 0) {
if (tp->t_tcpreq_req == 0) {
/* none open */
return (NULL);
}
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
ent = &tp->t_http_info[i];
if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY)
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
ent = &tp->t_tcpreq_info[i];
if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY)
continue;
if ((ent->flags & TCP_HTTP_TRACK_FLG_OPEN) == 0) {
if ((ent->flags & TCP_TRK_TRACK_FLG_OPEN) == 0) {
if (SEQ_GEQ(th_ack, ent->end_seq)) {
*ip = i;
return (ent);
@ -4485,24 +4485,24 @@ tcp_http_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *
return (NULL);
}
struct http_sendfile_track *
tcp_http_find_req_for_seq(struct tcpcb *tp, tcp_seq seq)
struct tcp_sendfile_track *
tcp_req_find_req_for_seq(struct tcpcb *tp, tcp_seq seq)
{
struct http_sendfile_track *ent;
struct tcp_sendfile_track *ent;
int i;
if (tp->t_http_req == 0) {
if (tp->t_tcpreq_req == 0) {
/* none open */
return (NULL);
}
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
ent = &tp->t_http_info[i];
tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_SEARCH,
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
ent = &tp->t_tcpreq_info[i];
tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_SEARCH,
(uint64_t)seq, 0);
if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) {
if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
continue;
}
if (ent->flags & TCP_HTTP_TRACK_FLG_OPEN) {
if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
/*
* An open end request only needs to
* match the beginning seq or be
@ -4511,7 +4511,7 @@ tcp_http_find_req_for_seq(struct tcpcb *tp, tcp_seq seq)
* wrap).
*/
if ((SEQ_GEQ(seq, ent->start_seq)) ||
(tp->t_http_closed == 0))
(tp->t_tcpreq_closed == 0))
return (ent);
} else {
/*
@ -4528,28 +4528,28 @@ tcp_http_find_req_for_seq(struct tcpcb *tp, tcp_seq seq)
return (NULL);
}
/* Should this be in its own file tcp_http.c ? */
struct http_sendfile_track *
tcp_http_alloc_req_full(struct tcpcb *tp, struct http_req *req, uint64_t ts, int rec_dups)
/* Should this be in its own file tcp_req.c ? */
struct tcp_sendfile_track *
tcp_req_alloc_req_full(struct tcpcb *tp, struct tcp_snd_req *req, uint64_t ts, int rec_dups)
{
struct http_sendfile_track *fil;
struct tcp_sendfile_track *fil;
int i, allocated;
/* In case the stack does not check for completions do so now */
tcp_http_check_for_comp(tp, tp->snd_una);
tcp_req_check_for_comp(tp, tp->snd_una);
/* Check for stale entries */
if (tp->t_http_req)
tcp_http_check_for_stale_entries(tp, ts,
(tp->t_http_req >= MAX_TCP_HTTP_REQ));
if (tp->t_tcpreq_req)
tcp_req_check_for_stale_entries(tp, ts,
(tp->t_tcpreq_req >= MAX_TCP_TRK_REQ));
/* Check to see if this is a duplicate of one not started */
if (tp->t_http_req) {
for(i = 0, allocated = 0; i < MAX_TCP_HTTP_REQ; i++) {
fil = &tp->t_http_info[i];
if (fil->flags != TCP_HTTP_TRACK_FLG_USED)
if (tp->t_tcpreq_req) {
for(i = 0, allocated = 0; i < MAX_TCP_TRK_REQ; i++) {
fil = &tp->t_tcpreq_info[i];
if (fil->flags != TCP_TRK_TRACK_FLG_USED)
continue;
if ((fil->timestamp == req->timestamp) &&
(fil->start == req->start) &&
((fil->flags & TCP_HTTP_TRACK_FLG_OPEN) ||
((fil->flags & TCP_TRK_TRACK_FLG_OPEN) ||
(fil->end == req->end))) {
/*
* We already have this request
@ -4563,19 +4563,19 @@ tcp_http_alloc_req_full(struct tcpcb *tp, struct http_req *req, uint64_t ts, int
}
}
/* Ok if there is no room at the inn we are in trouble */
if (tp->t_http_req >= MAX_TCP_HTTP_REQ) {
tcp_trace_point(tp, TCP_TP_HTTP_LOG_FAIL);
for(i = 0; i < MAX_TCP_HTTP_REQ; i++) {
tcp_http_log_req_info(tp, &tp->t_http_info[i],
i, TCP_HTTP_REQ_LOG_ALLOCFAIL, 0, 0);
if (tp->t_tcpreq_req >= MAX_TCP_TRK_REQ) {
tcp_trace_point(tp, TCP_TP_REQ_LOG_FAIL);
for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i],
i, TCP_TRK_REQ_LOG_ALLOCFAIL, 0, 0);
}
return (NULL);
}
for(i = 0, allocated = 0; i < MAX_TCP_HTTP_REQ; i++) {
fil = &tp->t_http_info[i];
if (fil->flags == TCP_HTTP_TRACK_FLG_EMPTY) {
for(i = 0, allocated = 0; i < MAX_TCP_TRK_REQ; i++) {
fil = &tp->t_tcpreq_info[i];
if (fil->flags == TCP_TRK_TRACK_FLG_EMPTY) {
allocated = 1;
fil->flags = TCP_HTTP_TRACK_FLG_USED;
fil->flags = TCP_TRK_TRACK_FLG_USED;
fil->timestamp = req->timestamp;
fil->localtime = ts;
fil->start = req->start;
@ -4583,7 +4583,7 @@ tcp_http_alloc_req_full(struct tcpcb *tp, struct http_req *req, uint64_t ts, int
fil->end = req->end;
} else {
fil->end = 0;
fil->flags |= TCP_HTTP_TRACK_FLG_OPEN;
fil->flags |= TCP_TRK_TRACK_FLG_OPEN;
}
/*
* We can set the min boundaries to the TCP Sequence space,
@ -4602,13 +4602,13 @@ tcp_http_alloc_req_full(struct tcpcb *tp, struct http_req *req, uint64_t ts, int
fil->end_seq += tcp_estimate_tls_overhead(
tptosocket(tp), (fil->end - fil->start));
}
tp->t_http_req++;
if (fil->flags & TCP_HTTP_TRACK_FLG_OPEN)
tp->t_http_open++;
tp->t_tcpreq_req++;
if (fil->flags & TCP_TRK_TRACK_FLG_OPEN)
tp->t_tcpreq_open++;
else
tp->t_http_closed++;
tcp_http_log_req_info(tp, fil, i,
TCP_HTTP_REQ_LOG_NEW, 0, 0);
tp->t_tcpreq_closed++;
tcp_req_log_req_info(tp, fil, i,
TCP_TRK_REQ_LOG_NEW, 0, 0);
break;
} else
fil = NULL;
@ -4617,9 +4617,9 @@ tcp_http_alloc_req_full(struct tcpcb *tp, struct http_req *req, uint64_t ts, int
}
void
tcp_http_alloc_req(struct tcpcb *tp, union tcp_log_userdata *user, uint64_t ts)
tcp_req_alloc_req(struct tcpcb *tp, union tcp_log_userdata *user, uint64_t ts)
{
(void)tcp_http_alloc_req_full(tp, &user->http_req, ts, 1);
(void)tcp_req_alloc_req_full(tp, &user->tcp_req, ts, 1);
}
#endif

View File

@ -61,14 +61,14 @@
#define TCP_EI_STATUS_2MSL 0xb
#define TCP_EI_STATUS_MAX_VALUE 0xb
#define TCP_HTTP_REQ_LOG_NEW 0x01
#define TCP_HTTP_REQ_LOG_COMPLETE 0x02
#define TCP_HTTP_REQ_LOG_FREED 0x03
#define TCP_HTTP_REQ_LOG_ALLOCFAIL 0x04
#define TCP_HTTP_REQ_LOG_MOREYET 0x05
#define TCP_HTTP_REQ_LOG_FORCEFREE 0x06
#define TCP_HTTP_REQ_LOG_STALE 0x07
#define TCP_HTTP_REQ_LOG_SEARCH 0x08
#define TCP_TRK_REQ_LOG_NEW 0x01
#define TCP_TRK_REQ_LOG_COMPLETE 0x02
#define TCP_TRK_REQ_LOG_FREED 0x03
#define TCP_TRK_REQ_LOG_ALLOCFAIL 0x04
#define TCP_TRK_REQ_LOG_MOREYET 0x05
#define TCP_TRK_REQ_LOG_FORCEFREE 0x06
#define TCP_TRK_REQ_LOG_STALE 0x07
#define TCP_TRK_REQ_LOG_SEARCH 0x08
/************************************************/
/* Status bits we track to assure no duplicates,
@ -135,16 +135,15 @@ struct sackhint {
STAILQ_HEAD(tcp_log_stailq, tcp_log_mem);
#define TCP_HTTP_TRACK_FLG_EMPTY 0x00 /* Available */
#define TCP_HTTP_TRACK_FLG_USED 0x01 /* In use */
#define TCP_HTTP_TRACK_FLG_OPEN 0x02 /* End is not valid (open range request) */
#define TCP_HTTP_TRACK_FLG_SEQV 0x04 /* We had a sendfile that touched it */
#define TCP_HTTP_TRACK_FLG_COMP 0x08 /* Sendfile as placed the last bits (range req only) */
#define TCP_HTTP_TRACK_FLG_FSND 0x10 /* First send has been done into the seq space */
#define MAX_TCP_HTTP_REQ 5 /* Max we will have at once */
#define TCP_TRK_TRACK_FLG_EMPTY 0x00 /* Available */
#define TCP_TRK_TRACK_FLG_USED 0x01 /* In use */
#define TCP_TRK_TRACK_FLG_OPEN 0x02 /* End is not valid (open range request) */
#define TCP_TRK_TRACK_FLG_SEQV 0x04 /* We had a sendfile that touched it */
#define TCP_TRK_TRACK_FLG_COMP 0x08 /* Sendfile as placed the last bits (range req only) */
#define TCP_TRK_TRACK_FLG_FSND 0x10 /* First send has been done into the seq space */
#define MAX_TCP_TRK_REQ 5 /* Max we will have at once */
#ifdef TCP_REQUEST_TRK
struct http_sendfile_track {
struct tcp_sendfile_track {
uint64_t timestamp; /* User sent timestamp */
uint64_t start; /* Start of sendfile offset */
uint64_t end; /* End if not open-range req */
@ -162,7 +161,6 @@ struct http_sendfile_track {
uint32_t hybrid_flags; /* Hybrid flags on this request */
};
#endif
/*
* Change Query responses for a stack switch we create a structure
@ -490,10 +488,10 @@ struct tcpcb {
uint8_t _t_logpoint; /* Used when a BB log points is enabled */
#ifdef TCP_REQUEST_TRK
/* Response tracking addons. */
uint8_t t_http_req; /* Request count */
uint8_t t_http_open; /* Number of open range requests */
uint8_t t_http_closed; /* Number of closed range requests */
struct http_sendfile_track t_http_info[MAX_TCP_HTTP_REQ];
uint8_t t_tcpreq_req; /* Request count */
uint8_t t_tcpreq_open; /* Number of open range requests */
uint8_t t_tcpreq_closed; /* Number of closed range requests */
struct tcp_sendfile_track t_tcpreq_info[MAX_TCP_TRK_REQ];
#endif
};
#endif /* _KERNEL || _WANT_TCPCB */
@ -1512,27 +1510,27 @@ struct mbuf *
int tcp_stats_init(void);
void tcp_log_end_status(struct tcpcb *tp, uint8_t status);
#ifdef TCP_REQUEST_TRK
void tcp_http_free_a_slot(struct tcpcb *tp, struct http_sendfile_track *ent);
struct http_sendfile_track *
tcp_http_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *ip);
int tcp_http_check_for_comp(struct tcpcb *tp, tcp_seq ack_point);
void tcp_req_free_a_slot(struct tcpcb *tp, struct tcp_sendfile_track *ent);
struct tcp_sendfile_track *
tcp_req_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *ip);
int tcp_req_check_for_comp(struct tcpcb *tp, tcp_seq ack_point);
int
tcp_http_is_entry_comp(struct tcpcb *tp, struct http_sendfile_track *ent, tcp_seq ack_point);
struct http_sendfile_track *
tcp_http_find_req_for_seq(struct tcpcb *tp, tcp_seq seq);
tcp_req_is_entry_comp(struct tcpcb *tp, struct tcp_sendfile_track *ent, tcp_seq ack_point);
struct tcp_sendfile_track *
tcp_req_find_req_for_seq(struct tcpcb *tp, tcp_seq seq);
void
tcp_http_log_req_info(struct tcpcb *tp,
struct http_sendfile_track *http, uint16_t slot,
tcp_req_log_req_info(struct tcpcb *tp,
struct tcp_sendfile_track *req, uint16_t slot,
uint8_t val, uint64_t offset, uint64_t nbytes);
uint32_t
tcp_estimate_tls_overhead(struct socket *so, uint64_t tls_usr_bytes);
void
tcp_http_alloc_req(struct tcpcb *tp, union tcp_log_userdata *user,
tcp_req_alloc_req(struct tcpcb *tp, union tcp_log_userdata *user,
uint64_t ts);
struct http_sendfile_track *
tcp_http_alloc_req_full(struct tcpcb *tp, struct http_req *req, uint64_t ts, int rec_dups);
struct tcp_sendfile_track *
tcp_req_alloc_req_full(struct tcpcb *tp, struct tcp_snd_req *req, uint64_t ts, int rec_dups);
#endif