Lines Matching refs:icsk
67 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() local
80 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent()
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() local
175 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
1240 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb() local
1372 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, in __tcp_transmit_skb()
1402 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, in __tcp_transmit_skb()
1696 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss() local
1702 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1705 if (icsk->icsk_af_ops->net_frag_header_len) { in __tcp_mtu_to_mss()
1709 mss_now -= icsk->icsk_af_ops->net_frag_header_len; in __tcp_mtu_to_mss()
1717 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1738 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu() local
1743 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1744 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1747 if (icsk->icsk_af_ops->net_frag_header_len) { in tcp_mss_to_mtu()
1751 mtu += icsk->icsk_af_ops->net_frag_header_len; in tcp_mss_to_mtu()
1761 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init() local
1764 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; in tcp_mtup_init()
1765 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1766 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1767 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); in tcp_mtup_init()
1768 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1769 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1770 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtup_init()
1799 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss() local
1802 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1803 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
1809 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
1810 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
1811 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
2178 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_tso_should_defer() local
2185 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
2277 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_check_reprobe() local
2284 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
2289 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
2290 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2292 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
2293 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2296 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_check_reprobe()
2329 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_probe() local
2344 if (likely(!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
2345 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
2356 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2357 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
2359 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
2364 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2457 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2729 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_schedule_loss_probe() local
2746 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()
2747 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()
2941 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_select_window() local
2949 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
2965 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3141 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_retransmit_skb() local
3148 if (icsk->icsk_mtup.probe_size) in __tcp_retransmit_skb()
3149 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
3299 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_xmit_retransmit_queue() local
3340 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3361 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) in tcp_xmit_retransmit_queue()
3634 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_dst_init() local
3644 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
3645 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
3646 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
3750 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_syn_data() local
3766 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_send_syn_data()
3768 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - in tcp_send_syn_data()
3899 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_delayed_ack() local
3900 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
3908 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
3934 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
3936 if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { in tcp_send_delayed_ack()
3941 if (!time_before(timeout, icsk->icsk_ack.timeout)) in tcp_send_delayed_ack()
3942 timeout = icsk->icsk_ack.timeout; in tcp_send_delayed_ack()
3944 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; in tcp_send_delayed_ack()
3945 icsk->icsk_ack.timeout = timeout; in tcp_send_delayed_ack()
3946 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3965 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_send_ack() local
3968 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; in __tcp_send_ack()
3970 icsk->icsk_ack.retry++; in __tcp_send_ack()
3972 icsk->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
4089 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_probe0() local
4099 icsk->icsk_probes_out = 0; in tcp_send_probe0()
4100 icsk->icsk_backoff = 0; in tcp_send_probe0()
4101 icsk->icsk_probes_tstamp = 0; in tcp_send_probe0()
4105 icsk->icsk_probes_out++; in tcp_send_probe0()
4107 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) in tcp_send_probe0()
4108 icsk->icsk_backoff++; in tcp_send_probe0()