Lines Matching refs:sock_net

216 		dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);  in tcp_gro_dev_warn()
429 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2]))); in tcp_sndbuf_expand()
464 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1; in __tcp_grow_window()
529 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win); in tcp_init_buffer_space()
568 struct net *net = sock_net(sk); in tcp_clamp_window()
720 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && in tcp_rcv_space_adjust()
741 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_rcv_space_adjust()
906 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio; in tcp_update_pacing_rate()
908 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio; in tcp_update_pacing_rate()
1038 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); in tcp_check_sack_reordering()
1043 NET_INC_STATS(sock_net(sk), in tcp_check_sack_reordering()
1083 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, in tcp_mark_skb_lost()
1243 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); in tcp_check_dsack()
1250 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); in tcp_check_dsack()
1257 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS); in tcp_check_dsack()
1261 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs); in tcp_check_dsack()
1464 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); in tcp_shifted_skb()
1491 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); in tcp_shifted_skb()
1662 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); in tcp_shift_skb_data()
1854 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_sacktag_write_queue()
2017 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); in tcp_check_reno_reordering()
2019 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); in tcp_check_reno_reordering()
2082 return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & in tcp_is_rack()
2099 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); in tcp_timeout_mark_lost()
2125 struct net *net = sock_net(sk); in tcp_enter_loss()
2534 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_try_undo_recovery()
2555 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); in tcp_try_undo_dsack()
2570 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); in tcp_try_undo_loss()
2572 NET_INC_STATS(sock_net(sk), in tcp_try_undo_loss()
2703 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); in tcp_mtup_probe_failed()
2726 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); in tcp_mtup_probe_success()
2781 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_enter_recovery()
2883 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); in tcp_try_undo_partial()
3043 u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ; in tcp_update_rtt_min()
3455 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering)) in tcp_may_raise_cwnd()
3604 struct net *net = sock_net(sk); in tcp_send_challenge_ack()
3675 NET_INC_STATS(sock_net(sk), in tcp_process_tlp_ack()
3716 const struct net *net = sock_net(sk); in tcp_newly_delivered()
3804 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); in tcp_ack()
3811 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); in tcp_ack()
4407 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_dsack_set()
4415 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_dsack_set()
4442 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH); in tcp_rcv_spurious_retrans()
4451 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_send_dupack()
4454 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_send_dupack()
4510 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, in tcp_sack_compress_send_ack()
4643 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); in tcp_try_coalesce()
4762 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); in tcp_data_queue_ofo()
4773 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); in tcp_data_queue_ofo()
4825 NET_INC_STATS(sock_net(sk), in tcp_data_queue_ofo()
4844 NET_INC_STATS(sock_net(sk), in tcp_data_queue_ofo()
4873 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4939 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); in tcp_send_rcvq()
5001 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); in tcp_data_queue()
5010 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); in tcp_data_queue()
5046 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_data_queue()
5069 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); in tcp_data_queue()
5098 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); in tcp_collapse_one()
5303 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); in tcp_prune_ofo_queue()
5343 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); in tcp_prune_queue()
5376 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); in tcp_prune_queue()
5480 tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) in __tcp_ack_snd_check()
5502 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns), in __tcp_ack_snd_check()
5506 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns), in __tcp_ack_snd_check()
5534 if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg)) in tcp_check_urg()
5647 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5651 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_validate_incoming()
5652 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5672 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5735 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_validate_incoming()
5736 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); in tcp_validate_incoming()
5863 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5887 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); in tcp_rcv_established()
5945 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_rcv_established()
5946 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
6025 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL); in tcp_rcv_fastopen_synack()
6059 NET_INC_STATS(sock_net(sk), in tcp_rcv_fastopen_synack()
6065 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); in tcp_rcv_fastopen_synack()
6110 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
6136 NET_INC_STATS(sock_net(sk), in tcp_rcv_synsent_state_process()
6521 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6530 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6591 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6654 const struct net *net = sock_net(listen_sk); in tcp_ecn_create_request()
6715 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); in inet_reqsk_alloc()
6730 struct net *net = sock_net(sk); in tcp_syn_flood_action()
6740 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); in tcp_syn_flood_action()
6743 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); in tcp_syn_flood_action()
6794 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 && in tcp_get_syncookie_mss()
6802 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_get_syncookie_mss()
6822 struct net *net = sock_net(sk); in tcp_conn_request()
6843 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_conn_request()
6861 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, in tcp_conn_request()