Home
last modified time | relevance | path

Searched refs:inet_csk (Results 1 – 25 of 59) sorted by relevance

123

/OK3568_Linux_fs/kernel/include/net/
H A Dinet_connection_sock.h153 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function
160 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
183 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
188 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
193 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
201 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
226 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
279 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
284 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
310 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()
[all …]
H A Dtcp.h355 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
670 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
671 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
710 u32 rto_min = inet_csk(sk)->icsk_rto_min; in tcp_rto_min()
1137 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1144 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state()
1153 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1223 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1327 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1334 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when()
[all …]
H A Despintcp.h35 struct inet_connection_sock *icsk = inet_csk(sk); in espintcp_getctx()
/OK3568_Linux_fs/kernel/net/ipv4/
H A Dtcp_recovery.c18 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd()
114 timeout, inet_csk(sk)->icsk_rto); in tcp_rack_mark_lost()
162 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout()
164 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rack_reo_timeout()
169 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) in tcp_rack_reo_timeout()
222 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
H A Dtcp_timer.c28 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout()
45 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout()
215 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
233 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout()
291 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler()
357 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer()
408 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer()
452 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer()
598 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler()
676 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
H A Dinet_connection_sock.c400 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port()
402 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port()
417 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect()
465 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept()
554 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers()
565 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers()
713 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop()
732 struct inet_connection_sock *icsk = inet_csk(sk_listener); in reqsk_timer_handler()
812 struct inet_connection_sock *icsk = inet_csk(newsk); in inet_clone_ulp()
836 struct inet_connection_sock *newicsk = inet_csk(newsk); in inet_csk_clone_lock()
[all …]
H A Dtcp_ulp.c106 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_ulp()
114 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp()
132 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_set_ulp()
H A Dtcp_output.c67 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
1112 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1240 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb()
1696 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1738 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1761 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1799 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1834 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
[all …]
H A Dtcp_dctcp.h29 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { in dctcp_ece_ack_update()
33 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ece_ack_update()
H A Dinet_hashtables.c100 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash()
115 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port()
117 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port()
142 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port()
207 hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node, in inet_hash2()
210 hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node, in inet_hash2()
221 WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node))) in inet_unhash2()
227 hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node); in inet_unhash2()
608 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; in inet_reuseport_add_sock()
618 inet_csk(sk2)->icsk_bind_hash == tb && in inet_reuseport_add_sock()
[all …]
H A Dtcp_input.c229 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
280 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
292 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
306 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
329 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in tcp_ecn_accept_cwr()
403 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_sndbuf_expand()
468 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
519 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
567 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
601 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
[all …]
H A Dtcp_fastopen.c49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher()
94 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_reset_cipher()
260 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child()
264 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child()
326 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check()
596 u32 timeouts = inet_csk(sk)->icsk_retransmits; in tcp_fastopen_active_detect_blackhole()
H A Dtcp_cong.c160 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control()
179 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control()
194 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control()
213 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control()
355 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control()
H A Dtcp_dctcp.c98 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init()
156 new_state != inet_csk(sk)->icsk_ca_state) in dctcp_state()
193 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { in dctcp_get_info()
H A Dtcp_minisocks.c255 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait()
408 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child()
470 newicsk = inet_csk(newsk); in tcp_create_openreq_child()
759 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req()
772 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
778 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in tcp_check_req()
H A Dtcp_diag.c115 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux()
146 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux_size()
/OK3568_Linux_fs/kernel/net/dccp/
H A Doutput.c46 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb()
161 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss()
268 inet_csk(sk)->icsk_rto, in dccp_xmit_packet()
381 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb()
385 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb()
512 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset()
537 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect()
581 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack()
601 struct inet_connection_sock *icsk = inet_csk(sk);
H A Dminisocks.c38 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait()
91 struct inet_connection_sock *newicsk = inet_csk(newsk); in dccp_create_openreq_child()
194 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in dccp_check_req()
H A Dipv6.c149 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err()
434 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_request_recv_sock()
454 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in dccp_v6_request_recv_sock()
526 inet_csk(newsk)->icsk_ext_hdr_len = 0; in dccp_v6_request_recv_sock()
528 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in dccp_v6_request_recv_sock()
812 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect()
1009 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
H A Dtimer.c33 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout()
85 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer()
H A Dproto.c101 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state()
186 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock()
222 if (inet_csk(sk)->icsk_bind_hash != NULL) in dccp_destroy_sock()
260 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect()
578 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, in dccp_setsockopt()
686 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, in dccp_getsockopt()
/OK3568_Linux_fs/kernel/tools/testing/selftests/bpf/progs/
H A Dbpf_dctcp.c126 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) in BPF_PROG()
159 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { in dctcp_ece_ack_update()
163 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ece_ack_update()
/OK3568_Linux_fs/kernel/net/mptcp/
H A Dmptcp_diag.c24 return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI, in sk_diag_dump()
53 err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0, in mptcp_diag_dump_one()
/OK3568_Linux_fs/kernel/tools/testing/selftests/bpf/
H A Dbpf_tcp_helpers.h80 static __always_inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function
87 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
/OK3568_Linux_fs/kernel/net/core/
H A Drequest_sock.c97 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()

123