In preparation of merging projects/sendfile, transform bare access to
sb_cc member of struct sockbuf to a couple of inline functions: sbavail() and sbused() Right now they are equal, but once notion of "not ready socket buffer data", will be checked in, they are going to be different. Sponsored by: Netflix Sponsored by: Nginx, Inc.
This commit is contained in:
parent
14548525ed
commit
c0b38b545a
@ -1507,11 +1507,11 @@ process_data(struct iwch_ep *ep)
|
||||
process_mpa_request(ep);
|
||||
break;
|
||||
default:
|
||||
if (ep->com.so->so_rcv.sb_cc)
|
||||
if (sbavail(&ep->com.so->so_rcv))
|
||||
printf("%s Unexpected streaming data."
|
||||
" ep %p state %d so %p so_state %x so_rcv.sb_cc %u so_rcv.sb_mb %p\n",
|
||||
__FUNCTION__, ep, state_read(&ep->com), ep->com.so, ep->com.so->so_state,
|
||||
ep->com.so->so_rcv.sb_cc, ep->com.so->so_rcv.sb_mb);
|
||||
sbavail(&ep->com.so->so_rcv), ep->com.so->so_rcv.sb_mb);
|
||||
break;
|
||||
}
|
||||
return;
|
||||
|
@ -445,8 +445,8 @@ t3_push_frames(struct socket *so, int req_completion)
|
||||
* Autosize the send buffer.
|
||||
*/
|
||||
if (snd->sb_flags & SB_AUTOSIZE && VNET(tcp_do_autosndbuf)) {
|
||||
if (snd->sb_cc >= (snd->sb_hiwat / 8 * 7) &&
|
||||
snd->sb_cc < VNET(tcp_autosndbuf_max)) {
|
||||
if (sbused(snd) >= (snd->sb_hiwat / 8 * 7) &&
|
||||
sbused(snd) < VNET(tcp_autosndbuf_max)) {
|
||||
if (!sbreserve_locked(snd, min(snd->sb_hiwat +
|
||||
VNET(tcp_autosndbuf_inc), VNET(tcp_autosndbuf_max)),
|
||||
so, curthread))
|
||||
@ -597,10 +597,10 @@ t3_rcvd(struct toedev *tod, struct tcpcb *tp)
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
|
||||
SOCKBUF_LOCK(so_rcv);
|
||||
KASSERT(toep->tp_enqueued >= so_rcv->sb_cc,
|
||||
("%s: so_rcv->sb_cc > enqueued", __func__));
|
||||
toep->tp_rx_credits += toep->tp_enqueued - so_rcv->sb_cc;
|
||||
toep->tp_enqueued = so_rcv->sb_cc;
|
||||
KASSERT(toep->tp_enqueued >= sbused(so_rcv),
|
||||
("%s: sbused(so_rcv) > enqueued", __func__));
|
||||
toep->tp_rx_credits += toep->tp_enqueued - sbused(so_rcv);
|
||||
toep->tp_enqueued = sbused(so_rcv);
|
||||
SOCKBUF_UNLOCK(so_rcv);
|
||||
|
||||
must_send = toep->tp_rx_credits + 16384 >= tp->rcv_wnd;
|
||||
@ -1768,7 +1768,7 @@ wr_ack(struct toepcb *toep, struct mbuf *m)
|
||||
so_sowwakeup_locked(so);
|
||||
}
|
||||
|
||||
if (snd->sb_sndptroff < snd->sb_cc)
|
||||
if (snd->sb_sndptroff < sbused(snd))
|
||||
t3_push_frames(so, 0);
|
||||
|
||||
out_free:
|
||||
|
@ -584,8 +584,8 @@ process_data(struct c4iw_ep *ep)
|
||||
{
|
||||
struct sockaddr_in *local, *remote;
|
||||
|
||||
CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__,
|
||||
ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc);
|
||||
CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
|
||||
ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
|
||||
|
||||
switch (state_read(&ep->com)) {
|
||||
case MPA_REQ_SENT:
|
||||
@ -601,11 +601,11 @@ process_data(struct c4iw_ep *ep)
|
||||
process_mpa_request(ep);
|
||||
break;
|
||||
default:
|
||||
if (ep->com.so->so_rcv.sb_cc)
|
||||
log(LOG_ERR, "%s: Unexpected streaming data. "
|
||||
"ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n",
|
||||
if (sbused(&ep->com.so->so_rcv))
|
||||
log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
|
||||
"state %d, so %p, so_state 0x%x, sbused %u\n",
|
||||
__func__, ep, state_read(&ep->com), ep->com.so,
|
||||
ep->com.so->so_state, ep->com.so->so_rcv.sb_cc);
|
||||
ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -365,15 +365,15 @@ t4_rcvd(struct toedev *tod, struct tcpcb *tp)
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
|
||||
SOCKBUF_LOCK(sb);
|
||||
KASSERT(toep->sb_cc >= sb->sb_cc,
|
||||
KASSERT(toep->sb_cc >= sbused(sb),
|
||||
("%s: sb %p has more data (%d) than last time (%d).",
|
||||
__func__, sb, sb->sb_cc, toep->sb_cc));
|
||||
__func__, sb, sbused(sb), toep->sb_cc));
|
||||
if (toep->ulp_mode == ULP_MODE_ISCSI) {
|
||||
toep->rx_credits += toep->sb_cc;
|
||||
toep->sb_cc = 0;
|
||||
} else {
|
||||
toep->rx_credits += toep->sb_cc - sb->sb_cc;
|
||||
toep->sb_cc = sb->sb_cc;
|
||||
toep->rx_credits += toep->sb_cc - sbused(sb);
|
||||
toep->sb_cc = sbused(sb);
|
||||
}
|
||||
credits = toep->rx_credits;
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
@ -1079,15 +1079,15 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
|
||||
tp->rcv_nxt = be32toh(cpl->rcv_nxt);
|
||||
toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
|
||||
|
||||
KASSERT(toep->sb_cc >= sb->sb_cc,
|
||||
KASSERT(toep->sb_cc >= sbused(sb),
|
||||
("%s: sb %p has more data (%d) than last time (%d).",
|
||||
__func__, sb, sb->sb_cc, toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sb->sb_cc;
|
||||
__func__, sb, sbused(sb), toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sbused(sb);
|
||||
#ifdef USE_DDP_RX_FLOW_CONTROL
|
||||
toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */
|
||||
#endif
|
||||
sbappendstream_locked(sb, m);
|
||||
toep->sb_cc = sb->sb_cc;
|
||||
toep->sb_cc = sbused(sb);
|
||||
}
|
||||
socantrcvmore_locked(so); /* unlocks the sockbuf */
|
||||
|
||||
@ -1582,12 +1582,12 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
|
||||
}
|
||||
}
|
||||
|
||||
KASSERT(toep->sb_cc >= sb->sb_cc,
|
||||
KASSERT(toep->sb_cc >= sbused(sb),
|
||||
("%s: sb %p has more data (%d) than last time (%d).",
|
||||
__func__, sb, sb->sb_cc, toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sb->sb_cc;
|
||||
__func__, sb, sbused(sb), toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sbused(sb);
|
||||
sbappendstream_locked(sb, m);
|
||||
toep->sb_cc = sb->sb_cc;
|
||||
toep->sb_cc = sbused(sb);
|
||||
sorwakeup_locked(so);
|
||||
SOCKBUF_UNLOCK_ASSERT(sb);
|
||||
|
||||
|
@ -224,15 +224,15 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
|
||||
tp->rcv_wnd -= n;
|
||||
#endif
|
||||
|
||||
KASSERT(toep->sb_cc >= sb->sb_cc,
|
||||
KASSERT(toep->sb_cc >= sbused(sb),
|
||||
("%s: sb %p has more data (%d) than last time (%d).",
|
||||
__func__, sb, sb->sb_cc, toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sb->sb_cc;
|
||||
__func__, sb, sbused(sb), toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sbused(sb);
|
||||
#ifdef USE_DDP_RX_FLOW_CONTROL
|
||||
toep->rx_credits -= n; /* adjust for F_RX_FC_DDP */
|
||||
#endif
|
||||
sbappendstream_locked(sb, m);
|
||||
toep->sb_cc = sb->sb_cc;
|
||||
toep->sb_cc = sbused(sb);
|
||||
}
|
||||
|
||||
/* SET_TCB_FIELD sent as a ULP command looks like this */
|
||||
@ -459,15 +459,15 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
|
||||
else
|
||||
discourage_ddp(toep);
|
||||
|
||||
KASSERT(toep->sb_cc >= sb->sb_cc,
|
||||
KASSERT(toep->sb_cc >= sbused(sb),
|
||||
("%s: sb %p has more data (%d) than last time (%d).",
|
||||
__func__, sb, sb->sb_cc, toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sb->sb_cc;
|
||||
__func__, sb, sbused(sb), toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sbused(sb);
|
||||
#ifdef USE_DDP_RX_FLOW_CONTROL
|
||||
toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */
|
||||
#endif
|
||||
sbappendstream_locked(sb, m);
|
||||
toep->sb_cc = sb->sb_cc;
|
||||
toep->sb_cc = sbused(sb);
|
||||
wakeup:
|
||||
KASSERT(toep->ddp_flags & db_flag,
|
||||
("%s: DDP buffer not active. toep %p, ddp_flags 0x%x, report 0x%x",
|
||||
@ -908,7 +908,7 @@ handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
|
||||
#endif
|
||||
|
||||
/* XXX: too eager to disable DDP, could handle NBIO better than this. */
|
||||
if (sb->sb_cc >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
|
||||
if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
|
||||
uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
|
||||
so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
|
||||
error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
|
||||
@ -946,7 +946,7 @@ handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
|
||||
* payload.
|
||||
*/
|
||||
ddp_flags = select_ddp_flags(so, flags, db_idx);
|
||||
wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sb->sb_cc, ddp_flags);
|
||||
wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
|
||||
if (wr == NULL) {
|
||||
/*
|
||||
* Just unhold the pages. The DDP buffer's software state is
|
||||
@ -1134,8 +1134,8 @@ t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* uio should be just as it was at entry */
|
||||
KASSERT(oresid == uio->uio_resid,
|
||||
("%s: oresid = %d, uio_resid = %zd, sb_cc = %d",
|
||||
__func__, oresid, uio->uio_resid, sb->sb_cc));
|
||||
("%s: oresid = %d, uio_resid = %zd, sbused = %d",
|
||||
__func__, oresid, uio->uio_resid, sbused(sb)));
|
||||
|
||||
error = handle_ddp(so, uio, flags, 0);
|
||||
ddp_handled = 1;
|
||||
@ -1145,7 +1145,7 @@ t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* Abort if socket has reported problems. */
|
||||
if (so->so_error) {
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbused(sb))
|
||||
goto deliver;
|
||||
if (oresid > uio->uio_resid)
|
||||
goto out;
|
||||
@ -1157,32 +1157,32 @@ t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* Door is closed. Deliver what is left, if any. */
|
||||
if (sb->sb_state & SBS_CANTRCVMORE) {
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbused(sb))
|
||||
goto deliver;
|
||||
else
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Socket buffer is empty and we shall not block. */
|
||||
if (sb->sb_cc == 0 &&
|
||||
if (sbused(sb) == 0 &&
|
||||
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
|
||||
error = EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Socket buffer got some data that we shall deliver now. */
|
||||
if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
|
||||
if (sbused(sb) && !(flags & MSG_WAITALL) &&
|
||||
((sb->sb_flags & SS_NBIO) ||
|
||||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
|
||||
sb->sb_cc >= sb->sb_lowat ||
|
||||
sb->sb_cc >= uio->uio_resid ||
|
||||
sb->sb_cc >= sb->sb_hiwat) ) {
|
||||
sbused(sb) >= sb->sb_lowat ||
|
||||
sbused(sb) >= uio->uio_resid ||
|
||||
sbused(sb) >= sb->sb_hiwat) ) {
|
||||
goto deliver;
|
||||
}
|
||||
|
||||
/* On MSG_WAITALL we must wait until all data or error arrives. */
|
||||
if ((flags & MSG_WAITALL) &&
|
||||
(sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
|
||||
(sbused(sb) >= uio->uio_resid || sbused(sb) >= sb->sb_lowat))
|
||||
goto deliver;
|
||||
|
||||
/*
|
||||
@ -1201,7 +1201,7 @@ t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
deliver:
|
||||
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
|
||||
KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
|
||||
KASSERT(sbused(sb) > 0, ("%s: sockbuf empty", __func__));
|
||||
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
|
||||
|
||||
if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled)
|
||||
@ -1212,7 +1212,7 @@ t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
uio->uio_td->td_ru.ru_msgrcv++;
|
||||
|
||||
/* Fill uio until full or current end of socket buffer is reached. */
|
||||
len = min(uio->uio_resid, sb->sb_cc);
|
||||
len = min(uio->uio_resid, sbused(sb));
|
||||
if (mp0 != NULL) {
|
||||
/* Dequeue as many mbufs as possible. */
|
||||
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
|
||||
|
@ -758,7 +758,7 @@ icl_receive_thread(void *arg)
|
||||
* is enough data received to read the PDU.
|
||||
*/
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
available = so->so_rcv.sb_cc;
|
||||
available = sbavail(&so->so_rcv);
|
||||
if (available < ic->ic_receive_len) {
|
||||
so->so_rcv.sb_lowat = ic->ic_receive_len;
|
||||
cv_wait(&ic->ic_receive_cv, &so->so_rcv.sb_mtx);
|
||||
|
@ -175,16 +175,17 @@ soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
|
||||
|
||||
case FIONREAD:
|
||||
/* Unlocked read. */
|
||||
*(int *)data = so->so_rcv.sb_cc;
|
||||
*(int *)data = sbavail(&so->so_rcv);
|
||||
break;
|
||||
|
||||
case FIONWRITE:
|
||||
/* Unlocked read. */
|
||||
*(int *)data = so->so_snd.sb_cc;
|
||||
*(int *)data = sbavail(&so->so_snd);
|
||||
break;
|
||||
|
||||
case FIONSPACE:
|
||||
if ((so->so_snd.sb_hiwat < so->so_snd.sb_cc) ||
|
||||
/* Unlocked read. */
|
||||
if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
|
||||
(so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt))
|
||||
*(int *)data = 0;
|
||||
else
|
||||
@ -254,6 +255,7 @@ soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
|
||||
struct thread *td)
|
||||
{
|
||||
struct socket *so = fp->f_data;
|
||||
struct sockbuf *sb;
|
||||
#ifdef MAC
|
||||
int error;
|
||||
#endif
|
||||
@ -269,15 +271,18 @@ soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
|
||||
* If SBS_CANTRCVMORE is set, but there's still data left in the
|
||||
* receive buffer, the socket is still readable.
|
||||
*/
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 ||
|
||||
so->so_rcv.sb_cc != 0)
|
||||
sb = &so->so_rcv;
|
||||
SOCKBUF_LOCK(sb);
|
||||
if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
|
||||
ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
|
||||
ub->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
|
||||
SOCKBUF_UNLOCK(&so->so_rcv);
|
||||
/* Unlocked read. */
|
||||
if ((so->so_snd.sb_state & SBS_CANTSENDMORE) == 0)
|
||||
ub->st_size = sbavail(sb) - sb->sb_ctl;
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
|
||||
sb = &so->so_snd;
|
||||
SOCKBUF_LOCK(sb);
|
||||
if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
|
||||
ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
ub->st_uid = so->so_cred->cr_uid;
|
||||
ub->st_gid = so->so_cred->cr_gid;
|
||||
return (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
|
||||
|
@ -1522,12 +1522,12 @@ soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
* 2. MSG_DONTWAIT is not set
|
||||
*/
|
||||
if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
|
||||
so->so_rcv.sb_cc < uio->uio_resid) &&
|
||||
so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
|
||||
sbavail(&so->so_rcv) < uio->uio_resid) &&
|
||||
sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
|
||||
m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
|
||||
KASSERT(m != NULL || !so->so_rcv.sb_cc,
|
||||
("receive: m == %p so->so_rcv.sb_cc == %u",
|
||||
m, so->so_rcv.sb_cc));
|
||||
KASSERT(m != NULL || !sbavail(&so->so_rcv),
|
||||
("receive: m == %p sbavail == %u",
|
||||
m, sbavail(&so->so_rcv)));
|
||||
if (so->so_error) {
|
||||
if (m != NULL)
|
||||
goto dontblock;
|
||||
@ -1976,7 +1976,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* Abort if socket has reported problems. */
|
||||
if (so->so_error) {
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbavail(sb) > 0)
|
||||
goto deliver;
|
||||
if (oresid > uio->uio_resid)
|
||||
goto out;
|
||||
@ -1988,32 +1988,32 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* Door is closed. Deliver what is left, if any. */
|
||||
if (sb->sb_state & SBS_CANTRCVMORE) {
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbavail(sb) > 0)
|
||||
goto deliver;
|
||||
else
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Socket buffer is empty and we shall not block. */
|
||||
if (sb->sb_cc == 0 &&
|
||||
if (sbavail(sb) == 0 &&
|
||||
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
|
||||
error = EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Socket buffer got some data that we shall deliver now. */
|
||||
if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
|
||||
if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
|
||||
((sb->sb_flags & SS_NBIO) ||
|
||||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
|
||||
sb->sb_cc >= sb->sb_lowat ||
|
||||
sb->sb_cc >= uio->uio_resid ||
|
||||
sb->sb_cc >= sb->sb_hiwat) ) {
|
||||
sbavail(sb) >= sb->sb_lowat ||
|
||||
sbavail(sb) >= uio->uio_resid ||
|
||||
sbavail(sb) >= sb->sb_hiwat) ) {
|
||||
goto deliver;
|
||||
}
|
||||
|
||||
/* On MSG_WAITALL we must wait until all data or error arrives. */
|
||||
if ((flags & MSG_WAITALL) &&
|
||||
(sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_hiwat))
|
||||
(sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
|
||||
goto deliver;
|
||||
|
||||
/*
|
||||
@ -2027,7 +2027,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
deliver:
|
||||
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
|
||||
KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
|
||||
KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
|
||||
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
|
||||
|
||||
/* Statistics. */
|
||||
@ -2035,7 +2035,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
uio->uio_td->td_ru.ru_msgrcv++;
|
||||
|
||||
/* Fill uio until full or current end of socket buffer is reached. */
|
||||
len = min(uio->uio_resid, sb->sb_cc);
|
||||
len = min(uio->uio_resid, sbavail(sb));
|
||||
if (mp0 != NULL) {
|
||||
/* Dequeue as many mbufs as possible. */
|
||||
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
|
||||
@ -2170,9 +2170,9 @@ soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
*/
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
while ((m = so->so_rcv.sb_mb) == NULL) {
|
||||
KASSERT(so->so_rcv.sb_cc == 0,
|
||||
("soreceive_dgram: sb_mb NULL but sb_cc %u",
|
||||
so->so_rcv.sb_cc));
|
||||
KASSERT(sbavail(&so->so_rcv) == 0,
|
||||
("soreceive_dgram: sb_mb NULL but sbavail %u",
|
||||
sbavail(&so->so_rcv)));
|
||||
if (so->so_error) {
|
||||
error = so->so_error;
|
||||
so->so_error = 0;
|
||||
@ -3248,7 +3248,7 @@ filt_soread(struct knote *kn, long hint)
|
||||
so = kn->kn_fp->f_data;
|
||||
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
|
||||
|
||||
kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
|
||||
kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
|
||||
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
|
||||
kn->kn_flags |= EV_EOF;
|
||||
kn->kn_fflags = so->so_error;
|
||||
@ -3260,7 +3260,7 @@ filt_soread(struct knote *kn, long hint)
|
||||
if (kn->kn_data >= kn->kn_sdata)
|
||||
return 1;
|
||||
} else {
|
||||
if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat)
|
||||
if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3451,7 +3451,7 @@ soisdisconnected(struct socket *so)
|
||||
sorwakeup_locked(so);
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
so->so_snd.sb_state |= SBS_CANTSENDMORE;
|
||||
sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
|
||||
sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
|
||||
sowwakeup_locked(so);
|
||||
wakeup(&so->so_timeo);
|
||||
}
|
||||
|
@ -1127,9 +1127,8 @@ ng_btsocket_l2cap_process_l2ca_write_rsp(struct ng_mesg *msg,
|
||||
/*
|
||||
* Check if we have more data to send
|
||||
*/
|
||||
|
||||
sbdroprecord(&pcb->so->so_snd);
|
||||
if (pcb->so->so_snd.sb_cc > 0) {
|
||||
if (sbavail(&pcb->so->so_snd) > 0) {
|
||||
if (ng_btsocket_l2cap_send2(pcb) == 0)
|
||||
ng_btsocket_l2cap_timeout(pcb);
|
||||
else
|
||||
@ -2513,7 +2512,7 @@ ng_btsocket_l2cap_send2(ng_btsocket_l2cap_pcb_p pcb)
|
||||
|
||||
mtx_assert(&pcb->pcb_mtx, MA_OWNED);
|
||||
|
||||
if (pcb->so->so_snd.sb_cc == 0)
|
||||
if (sbavail(&pcb->so->so_snd) == 0)
|
||||
return (EINVAL); /* XXX */
|
||||
|
||||
m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT);
|
||||
|
@ -3279,7 +3279,7 @@ ng_btsocket_rfcomm_pcb_send(ng_btsocket_rfcomm_pcb_p pcb, int limit)
|
||||
}
|
||||
|
||||
for (error = 0, sent = 0; sent < limit; sent ++) {
|
||||
length = min(pcb->mtu, pcb->so->so_snd.sb_cc);
|
||||
length = min(pcb->mtu, sbavail(&pcb->so->so_snd));
|
||||
if (length == 0)
|
||||
break;
|
||||
|
||||
|
@ -906,7 +906,7 @@ ng_btsocket_sco_default_msg_input(struct ng_mesg *msg, hook_p hook)
|
||||
sbdroprecord(&pcb->so->so_snd);
|
||||
|
||||
/* Send more if we have any */
|
||||
if (pcb->so->so_snd.sb_cc > 0)
|
||||
if (sbavail(&pcb->so->so_snd) > 0)
|
||||
if (ng_btsocket_sco_send2(pcb) == 0)
|
||||
ng_btsocket_sco_timeout(pcb);
|
||||
|
||||
@ -1748,7 +1748,7 @@ ng_btsocket_sco_send2(ng_btsocket_sco_pcb_p pcb)
|
||||
mtx_assert(&pcb->pcb_mtx, MA_OWNED);
|
||||
|
||||
while (pcb->rt->pending < pcb->rt->num_pkts &&
|
||||
pcb->so->so_snd.sb_cc > 0) {
|
||||
sbavail(&pcb->so->so_snd) > 0) {
|
||||
/* Get a copy of the first packet on send queue */
|
||||
m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT);
|
||||
if (m == NULL) {
|
||||
|
@ -75,7 +75,7 @@ sohasdns(struct socket *so, void *arg, int waitflag)
|
||||
struct sockbuf *sb = &so->so_rcv;
|
||||
|
||||
/* If the socket is full, we're ready. */
|
||||
if (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
|
||||
if (sbused(sb) >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
|
||||
goto ready;
|
||||
|
||||
/* Check to see if we have a request. */
|
||||
@ -115,14 +115,14 @@ skippacket(struct sockbuf *sb) {
|
||||
unsigned long packlen;
|
||||
struct packet q, *p = &q;
|
||||
|
||||
if (sb->sb_cc < 2)
|
||||
if (sbavail(sb) < 2)
|
||||
return DNS_WAIT;
|
||||
|
||||
q.m = sb->sb_mb;
|
||||
q.n = q.m->m_nextpkt;
|
||||
q.moff = 0;
|
||||
q.offset = 0;
|
||||
q.len = sb->sb_cc;
|
||||
q.len = sbavail(sb);
|
||||
|
||||
GET16(p, packlen);
|
||||
if (packlen + 2 > q.len)
|
||||
|
@ -92,7 +92,7 @@ sbfull(struct sockbuf *sb)
|
||||
"mbcnt(%ld) >= mbmax(%ld): %d",
|
||||
sb->sb_cc, sb->sb_hiwat, sb->sb_cc >= sb->sb_hiwat,
|
||||
sb->sb_mbcnt, sb->sb_mbmax, sb->sb_mbcnt >= sb->sb_mbmax);
|
||||
return (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
|
||||
return (sbused(sb) >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -162,13 +162,14 @@ static int
|
||||
sohashttpget(struct socket *so, void *arg, int waitflag)
|
||||
{
|
||||
|
||||
if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 && !sbfull(&so->so_rcv)) {
|
||||
if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 &&
|
||||
!sbfull(&so->so_rcv)) {
|
||||
struct mbuf *m;
|
||||
char *cmp;
|
||||
int cmplen, cc;
|
||||
|
||||
m = so->so_rcv.sb_mb;
|
||||
cc = so->so_rcv.sb_cc - 1;
|
||||
cc = sbavail(&so->so_rcv) - 1;
|
||||
if (cc < 1)
|
||||
return (SU_OK);
|
||||
switch (*mtod(m, char *)) {
|
||||
@ -215,7 +216,7 @@ soparsehttpvers(struct socket *so, void *arg, int waitflag)
|
||||
goto fallout;
|
||||
|
||||
m = so->so_rcv.sb_mb;
|
||||
cc = so->so_rcv.sb_cc;
|
||||
cc = sbavail(&so->so_rcv);
|
||||
inspaces = spaces = 0;
|
||||
for (m = so->so_rcv.sb_mb; m; m = n) {
|
||||
n = m->m_nextpkt;
|
||||
@ -304,7 +305,7 @@ soishttpconnected(struct socket *so, void *arg, int waitflag)
|
||||
* have NCHRS left
|
||||
*/
|
||||
copied = 0;
|
||||
ccleft = so->so_rcv.sb_cc;
|
||||
ccleft = sbavail(&so->so_rcv);
|
||||
if (ccleft < NCHRS)
|
||||
goto readmore;
|
||||
a = b = c = '\0';
|
||||
|
@ -782,9 +782,9 @@ siftr_siftdata(struct pkt_node *pn, struct inpcb *inp, struct tcpcb *tp,
|
||||
pn->flags = tp->t_flags;
|
||||
pn->rxt_length = tp->t_rxtcur;
|
||||
pn->snd_buf_hiwater = inp->inp_socket->so_snd.sb_hiwat;
|
||||
pn->snd_buf_cc = inp->inp_socket->so_snd.sb_cc;
|
||||
pn->snd_buf_cc = sbused(&inp->inp_socket->so_snd);
|
||||
pn->rcv_buf_hiwater = inp->inp_socket->so_rcv.sb_hiwat;
|
||||
pn->rcv_buf_cc = inp->inp_socket->so_rcv.sb_cc;
|
||||
pn->rcv_buf_cc = sbused(&inp->inp_socket->so_rcv);
|
||||
pn->sent_inflight_bytes = tp->snd_max - tp->snd_una;
|
||||
pn->t_segqlen = tp->t_segqlen;
|
||||
|
||||
|
@ -1745,7 +1745,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tcp_timer_activate(tp, TT_REXMT,
|
||||
tp->t_rxtcur);
|
||||
sowwakeup(so);
|
||||
if (so->so_snd.sb_cc)
|
||||
if (sbavail(&so->so_snd))
|
||||
(void) tcp_output(tp);
|
||||
goto check_delack;
|
||||
}
|
||||
@ -2526,7 +2526,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* Otherwise we would send pure ACKs.
|
||||
*/
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
avail = so->so_snd.sb_cc -
|
||||
avail = sbavail(&so->so_snd) -
|
||||
(tp->snd_nxt - tp->snd_una);
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
if (avail > 0)
|
||||
@ -2661,10 +2661,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
cc_ack_received(tp, th, CC_ACK);
|
||||
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
if (acked > so->so_snd.sb_cc) {
|
||||
tp->snd_wnd -= so->so_snd.sb_cc;
|
||||
if (acked > sbavail(&so->so_snd)) {
|
||||
tp->snd_wnd -= sbavail(&so->so_snd);
|
||||
mfree = sbcut_locked(&so->so_snd,
|
||||
(int)so->so_snd.sb_cc);
|
||||
(int)sbavail(&so->so_snd));
|
||||
ourfinisacked = 1;
|
||||
} else {
|
||||
mfree = sbcut_locked(&so->so_snd, acked);
|
||||
@ -2790,7 +2790,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* actually wanting to send this much urgent data.
|
||||
*/
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
|
||||
if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
|
||||
th->th_urp = 0; /* XXX */
|
||||
thflags &= ~TH_URG; /* XXX */
|
||||
SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
|
||||
@ -2812,7 +2812,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
*/
|
||||
if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
|
||||
tp->rcv_up = th->th_seq + th->th_urp;
|
||||
so->so_oobmark = so->so_rcv.sb_cc +
|
||||
so->so_oobmark = sbavail(&so->so_rcv) +
|
||||
(tp->rcv_up - tp->rcv_nxt) - 1;
|
||||
if (so->so_oobmark == 0)
|
||||
so->so_rcv.sb_state |= SBS_RCVATMARK;
|
||||
|
@ -322,7 +322,7 @@ tcp_output(struct tcpcb *tp)
|
||||
* to send then the probe will be the FIN
|
||||
* itself.
|
||||
*/
|
||||
if (off < so->so_snd.sb_cc)
|
||||
if (off < sbused(&so->so_snd))
|
||||
flags &= ~TH_FIN;
|
||||
sendwin = 1;
|
||||
} else {
|
||||
@ -348,7 +348,8 @@ tcp_output(struct tcpcb *tp)
|
||||
*/
|
||||
if (sack_rxmit == 0) {
|
||||
if (sack_bytes_rxmt == 0)
|
||||
len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);
|
||||
len = ((long)ulmin(sbavail(&so->so_snd), sendwin) -
|
||||
off);
|
||||
else {
|
||||
long cwin;
|
||||
|
||||
@ -357,8 +358,8 @@ tcp_output(struct tcpcb *tp)
|
||||
* sending new data, having retransmitted all the
|
||||
* data possible in the scoreboard.
|
||||
*/
|
||||
len = ((long)ulmin(so->so_snd.sb_cc, tp->snd_wnd)
|
||||
- off);
|
||||
len = ((long)ulmin(sbavail(&so->so_snd), tp->snd_wnd) -
|
||||
off);
|
||||
/*
|
||||
* Don't remove this (len > 0) check !
|
||||
* We explicitly check for len > 0 here (although it
|
||||
@ -457,12 +458,15 @@ tcp_output(struct tcpcb *tp)
|
||||
* TODO: Shrink send buffer during idle periods together
|
||||
* with congestion window. Requires another timer. Has to
|
||||
* wait for upcoming tcp timer rewrite.
|
||||
*
|
||||
* XXXGL: should there be used sbused() or sbavail()?
|
||||
*/
|
||||
if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
|
||||
if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
|
||||
so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
|
||||
so->so_snd.sb_cc < V_tcp_autosndbuf_max &&
|
||||
sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
|
||||
sbused(&so->so_snd) >= (so->so_snd.sb_hiwat / 8 * 7) &&
|
||||
sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
|
||||
sendwin >= (sbused(&so->so_snd) -
|
||||
(tp->snd_nxt - tp->snd_una))) {
|
||||
if (!sbreserve_locked(&so->so_snd,
|
||||
min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
|
||||
V_tcp_autosndbuf_max), so, curthread))
|
||||
@ -499,10 +503,11 @@ tcp_output(struct tcpcb *tp)
|
||||
tso = 1;
|
||||
|
||||
if (sack_rxmit) {
|
||||
if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
|
||||
if (SEQ_LT(p->rxmit + len, tp->snd_una + sbused(&so->so_snd)))
|
||||
flags &= ~TH_FIN;
|
||||
} else {
|
||||
if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
|
||||
if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
|
||||
sbused(&so->so_snd)))
|
||||
flags &= ~TH_FIN;
|
||||
}
|
||||
|
||||
@ -532,7 +537,7 @@ tcp_output(struct tcpcb *tp)
|
||||
*/
|
||||
if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
|
||||
(idle || (tp->t_flags & TF_NODELAY)) &&
|
||||
len + off >= so->so_snd.sb_cc &&
|
||||
len + off >= sbavail(&so->so_snd) &&
|
||||
(tp->t_flags & TF_NOPUSH) == 0) {
|
||||
goto send;
|
||||
}
|
||||
@ -660,7 +665,7 @@ tcp_output(struct tcpcb *tp)
|
||||
* if window is nonzero, transmit what we can,
|
||||
* otherwise force out a byte.
|
||||
*/
|
||||
if (so->so_snd.sb_cc && !tcp_timer_active(tp, TT_REXMT) &&
|
||||
if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
|
||||
!tcp_timer_active(tp, TT_PERSIST)) {
|
||||
tp->t_rxtshift = 0;
|
||||
tcp_setpersist(tp);
|
||||
@ -863,7 +868,7 @@ tcp_output(struct tcpcb *tp)
|
||||
* emptied:
|
||||
*/
|
||||
max_len = (tp->t_maxopd - optlen);
|
||||
if ((off + len) < so->so_snd.sb_cc) {
|
||||
if ((off + len) < sbavail(&so->so_snd)) {
|
||||
moff = len % max_len;
|
||||
if (moff != 0) {
|
||||
len -= moff;
|
||||
@ -979,7 +984,7 @@ tcp_output(struct tcpcb *tp)
|
||||
* give data to the user when a buffer fills or
|
||||
* a PUSH comes in.)
|
||||
*/
|
||||
if (off + len == so->so_snd.sb_cc)
|
||||
if (off + len == sbused(&so->so_snd))
|
||||
flags |= TH_PUSH;
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
} else {
|
||||
|
@ -747,7 +747,7 @@ sdp_start_disconnect(struct sdp_sock *ssk)
|
||||
("sdp_start_disconnect: sdp_drop() returned NULL"));
|
||||
} else {
|
||||
soisdisconnecting(so);
|
||||
unread = so->so_rcv.sb_cc;
|
||||
unread = sbused(&so->so_rcv);
|
||||
sbflush(&so->so_rcv);
|
||||
sdp_usrclosed(ssk);
|
||||
if (!(ssk->flags & SDP_DROPPED)) {
|
||||
@ -1259,7 +1259,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
/* We will never ever get anything unless we are connected. */
|
||||
if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
|
||||
/* When disconnecting there may be still some data left. */
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbavail(sb))
|
||||
goto deliver;
|
||||
if (!(so->so_state & SS_ISDISCONNECTED))
|
||||
error = ENOTCONN;
|
||||
@ -1267,7 +1267,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
}
|
||||
|
||||
/* Socket buffer is empty and we shall not block. */
|
||||
if (sb->sb_cc == 0 &&
|
||||
if (sbavail(sb) == 0 &&
|
||||
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
|
||||
error = EAGAIN;
|
||||
goto out;
|
||||
@ -1278,7 +1278,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* Abort if socket has reported problems. */
|
||||
if (so->so_error) {
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbavail(sb))
|
||||
goto deliver;
|
||||
if (oresid > uio->uio_resid)
|
||||
goto out;
|
||||
@ -1290,25 +1290,25 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
/* Door is closed. Deliver what is left, if any. */
|
||||
if (sb->sb_state & SBS_CANTRCVMORE) {
|
||||
if (sb->sb_cc > 0)
|
||||
if (sbavail(sb))
|
||||
goto deliver;
|
||||
else
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Socket buffer got some data that we shall deliver now. */
|
||||
if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
|
||||
if (sbavail(sb) && !(flags & MSG_WAITALL) &&
|
||||
((so->so_state & SS_NBIO) ||
|
||||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
|
||||
sb->sb_cc >= sb->sb_lowat ||
|
||||
sb->sb_cc >= uio->uio_resid ||
|
||||
sb->sb_cc >= sb->sb_hiwat) ) {
|
||||
sbavail(sb) >= sb->sb_lowat ||
|
||||
sbavail(sb) >= uio->uio_resid ||
|
||||
sbavail(sb) >= sb->sb_hiwat) ) {
|
||||
goto deliver;
|
||||
}
|
||||
|
||||
/* On MSG_WAITALL we must wait until all data or error arrives. */
|
||||
if ((flags & MSG_WAITALL) &&
|
||||
(sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
|
||||
(sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat))
|
||||
goto deliver;
|
||||
|
||||
/*
|
||||
@ -1322,7 +1322,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
|
||||
deliver:
|
||||
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
|
||||
KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
|
||||
KASSERT(sbavail(sb), ("%s: sockbuf empty", __func__));
|
||||
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
|
||||
|
||||
/* Statistics. */
|
||||
@ -1330,7 +1330,7 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
uio->uio_td->td_ru.ru_msgrcv++;
|
||||
|
||||
/* Fill uio until full or current end of socket buffer is reached. */
|
||||
len = min(uio->uio_resid, sb->sb_cc);
|
||||
len = min(uio->uio_resid, sbavail(sb));
|
||||
if (mp0 != NULL) {
|
||||
/* Dequeue as many mbufs as possible. */
|
||||
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
|
||||
@ -1510,7 +1510,7 @@ sdp_urg(struct sdp_sock *ssk, struct mbuf *mb)
|
||||
if (so == NULL)
|
||||
return;
|
||||
|
||||
so->so_oobmark = so->so_rcv.sb_cc + mb->m_pkthdr.len - 1;
|
||||
so->so_oobmark = sbused(&so->so_rcv) + mb->m_pkthdr.len - 1;
|
||||
sohasoutofband(so);
|
||||
ssk->oobflags &= ~(SDP_HAVEOOB | SDP_HADOOB);
|
||||
if (!(so->so_options & SO_OOBINLINE)) {
|
||||
|
@ -183,7 +183,7 @@ sdp_post_recvs_needed(struct sdp_sock *ssk)
|
||||
* Compute bytes in the receive queue and socket buffer.
|
||||
*/
|
||||
bytes_in_process = (posted - SDP_MIN_TX_CREDITS) * buffer_size;
|
||||
bytes_in_process += ssk->socket->so_rcv.sb_cc;
|
||||
bytes_in_process += sbused(&ssk->socket->so_rcv);
|
||||
|
||||
return bytes_in_process < max_bytes;
|
||||
}
|
||||
|
@ -860,7 +860,7 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
|
||||
* error condition
|
||||
*/
|
||||
do_read = FALSE;
|
||||
if (so->so_rcv.sb_cc >= sizeof(uint32_t)
|
||||
if (sbavail(&so->so_rcv) >= sizeof(uint32_t)
|
||||
|| (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|
||||
|| so->so_error)
|
||||
do_read = TRUE;
|
||||
@ -913,7 +913,7 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
|
||||
* buffered.
|
||||
*/
|
||||
do_read = FALSE;
|
||||
if (so->so_rcv.sb_cc >= ct->ct_record_resid
|
||||
if (sbavail(&so->so_rcv) >= ct->ct_record_resid
|
||||
|| (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|
||||
|| so->so_error)
|
||||
do_read = TRUE;
|
||||
|
@ -546,7 +546,7 @@ svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
|
||||
{
|
||||
|
||||
*ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
|
||||
*ack -= xprt->xp_socket->so_snd.sb_cc;
|
||||
*ack -= sbused(&xprt->xp_socket->so_snd);
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
|
@ -165,6 +165,34 @@ int sbwait(struct sockbuf *sb);
|
||||
int sblock(struct sockbuf *sb, int flags);
|
||||
void sbunlock(struct sockbuf *sb);
|
||||
|
||||
/*
|
||||
* Return how much data is available to be taken out of socket
|
||||
* bufffer right now.
|
||||
*/
|
||||
static inline u_int
|
||||
sbavail(struct sockbuf *sb)
|
||||
{
|
||||
|
||||
#if 0
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
#endif
|
||||
return (sb->sb_cc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return how much data sits there in the socket buffer
|
||||
* It might be that some data is not yet ready to be read.
|
||||
*/
|
||||
static inline u_int
|
||||
sbused(struct sockbuf *sb)
|
||||
{
|
||||
|
||||
#if 0
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
#endif
|
||||
return (sb->sb_cc);
|
||||
}
|
||||
|
||||
/*
|
||||
* How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
|
||||
* This is problematical if the fields are unsigned, as the space might
|
||||
|
@ -208,7 +208,7 @@ struct xsocket {
|
||||
|
||||
/* can we read something from so? */
|
||||
#define soreadabledata(so) \
|
||||
((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \
|
||||
(sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \
|
||||
!TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
|
||||
#define soreadable(so) \
|
||||
(soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))
|
||||
|
Loading…
Reference in New Issue
Block a user