Lines Matching refs:size_goal
700 int size_goal) in tcp_should_autocork() argument
702 return skb->len < size_goal && in tcp_should_autocork()
709 int nonagle, int size_goal) in tcp_push() argument
722 if (tcp_should_autocork(sk, skb, size_goal)) { in tcp_push()
922 u32 new_size_goal, size_goal; in tcp_xmit_size_goal() local
932 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
933 if (unlikely(new_size_goal < size_goal || in tcp_xmit_size_goal()
934 new_size_goal >= size_goal + mss_now)) { in tcp_xmit_size_goal()
937 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
940 return max(size_goal, mss_now); in tcp_xmit_size_goal()
943 int tcp_send_mss(struct sock *sk, int *size_goal, int flags) in tcp_send_mss() argument
948 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); in tcp_send_mss()
973 int mss_now, size_goal; in do_tcp_sendpages() local
996 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
1008 if (!skb || (copy = size_goal - skb->len) <= 0 || in do_tcp_sendpages()
1023 copy = size_goal; in do_tcp_sendpages()
1067 if (skb->len < size_goal || (flags & MSG_OOB)) in do_tcp_sendpages()
1080 TCP_NAGLE_PUSH, size_goal); in do_tcp_sendpages()
1086 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
1093 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
1199 int mss_now = 0, size_goal, copied_syn = 0; in tcp_sendmsg_locked() local
1273 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg_locked()
1284 copy = size_goal - skb->len; in tcp_sendmsg_locked()
1308 copy = size_goal; in tcp_sendmsg_locked()
1395 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) in tcp_sendmsg_locked()
1409 TCP_NAGLE_PUSH, size_goal); in tcp_sendmsg_locked()
1415 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg_locked()
1421 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg_locked()