Lines Matching refs:po

189 static void *packet_previous_frame(struct packet_sock *po,
238 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
239 static void __fanout_link(struct sock *sk, struct packet_sock *po);
246 static struct net_device *packet_cached_dev_get(struct packet_sock *po) in packet_cached_dev_get() argument
251 dev = rcu_dereference(po->cached_dev); in packet_cached_dev_get()
259 static void packet_cached_dev_assign(struct packet_sock *po, in packet_cached_dev_assign() argument
262 rcu_assign_pointer(po->cached_dev, dev); in packet_cached_dev_assign()
265 static void packet_cached_dev_reset(struct packet_sock *po) in packet_cached_dev_reset() argument
267 RCU_INIT_POINTER(po->cached_dev, NULL); in packet_cached_dev_reset()
270 static bool packet_use_direct_xmit(const struct packet_sock *po) in packet_use_direct_xmit() argument
272 return po->xmit == packet_direct_xmit; in packet_use_direct_xmit()
302 struct packet_sock *po = pkt_sk(sk); in __register_prot_hook() local
304 if (!po->running) { in __register_prot_hook()
305 if (po->fanout) in __register_prot_hook()
306 __fanout_link(sk, po); in __register_prot_hook()
308 dev_add_pack(&po->prot_hook); in __register_prot_hook()
311 po->running = 1; in __register_prot_hook()
329 struct packet_sock *po = pkt_sk(sk); in __unregister_prot_hook() local
331 lockdep_assert_held_once(&po->bind_lock); in __unregister_prot_hook()
333 po->running = 0; in __unregister_prot_hook()
335 if (po->fanout) in __unregister_prot_hook()
336 __fanout_unlink(sk, po); in __unregister_prot_hook()
338 __dev_remove_pack(&po->prot_hook); in __unregister_prot_hook()
343 spin_unlock(&po->bind_lock); in __unregister_prot_hook()
345 spin_lock(&po->bind_lock); in __unregister_prot_hook()
351 struct packet_sock *po = pkt_sk(sk); in unregister_prot_hook() local
353 if (po->running) in unregister_prot_hook()
364 static void __packet_set_status(struct packet_sock *po, void *frame, int status) in __packet_set_status() argument
369 switch (po->tp_version) { in __packet_set_status()
390 static int __packet_get_status(const struct packet_sock *po, void *frame) in __packet_get_status() argument
397 switch (po->tp_version) { in __packet_get_status()
431 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, in __packet_set_timestamp() argument
438 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) in __packet_set_timestamp()
449 switch (po->tp_version) { in __packet_set_timestamp()
474 static void *packet_lookup_frame(const struct packet_sock *po, in packet_lookup_frame() argument
488 if (status != __packet_get_status(po, h.raw)) in packet_lookup_frame()
494 static void *packet_current_frame(struct packet_sock *po, in packet_current_frame() argument
498 return packet_lookup_frame(po, rb, rb->head, status); in packet_current_frame()
506 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, in prb_shutdown_retire_blk_timer() argument
511 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_shutdown_retire_blk_timer()
520 static void prb_setup_retire_blk_timer(struct packet_sock *po) in prb_setup_retire_blk_timer() argument
524 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_setup_retire_blk_timer()
530 static int prb_calc_retire_blk_tmo(struct packet_sock *po, in prb_calc_retire_blk_tmo() argument
539 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); in prb_calc_retire_blk_tmo()
573 static void init_prb_bdqc(struct packet_sock *po, in init_prb_bdqc() argument
589 p1->hdrlen = po->tp_hdrlen; in init_prb_bdqc()
590 p1->version = po->tp_version; in init_prb_bdqc()
592 po->stats.stats3.tp_freeze_q_cnt = 0; in init_prb_bdqc()
596 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, in init_prb_bdqc()
604 prb_setup_retire_blk_timer(po); in init_prb_bdqc()
643 struct packet_sock *po = in prb_retire_rx_blk_timer_expired() local
644 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); in prb_retire_rx_blk_timer_expired()
645 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_retire_rx_blk_timer_expired()
649 spin_lock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
678 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); in prb_retire_rx_blk_timer_expired()
679 if (!prb_dispatch_next_block(pkc, po)) in prb_retire_rx_blk_timer_expired()
711 spin_unlock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
759 struct packet_sock *po, unsigned int stat) in prb_close_block() argument
765 struct sock *sk = &po->sk; in prb_close_block()
767 if (atomic_read(&po->tp_drops)) in prb_close_block()
872 struct packet_sock *po) in prb_freeze_queue() argument
875 po->stats.stats3.tp_freeze_q_cnt++; in prb_freeze_queue()
887 struct packet_sock *po) in prb_dispatch_next_block() argument
898 prb_freeze_queue(pkc, po); in prb_dispatch_next_block()
912 struct packet_sock *po, unsigned int status) in prb_retire_current_block() argument
932 prb_close_block(pkc, pbd, po, status); in prb_retire_current_block()
1012 static void *__packet_lookup_frame_in_block(struct packet_sock *po, in __packet_lookup_frame_in_block() argument
1021 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in __packet_lookup_frame_in_block()
1056 prb_retire_current_block(pkc, po, 0); in __packet_lookup_frame_in_block()
1059 curr = (char *)prb_dispatch_next_block(pkc, po); in __packet_lookup_frame_in_block()
1073 static void *packet_current_rx_frame(struct packet_sock *po, in packet_current_rx_frame() argument
1078 switch (po->tp_version) { in packet_current_rx_frame()
1081 curr = packet_lookup_frame(po, &po->rx_ring, in packet_current_rx_frame()
1082 po->rx_ring.head, status); in packet_current_rx_frame()
1085 return __packet_lookup_frame_in_block(po, skb, len); in packet_current_rx_frame()
1093 static void *prb_lookup_block(const struct packet_sock *po, in prb_lookup_block() argument
1117 static void *__prb_previous_block(struct packet_sock *po, in __prb_previous_block() argument
1122 return prb_lookup_block(po, rb, previous, status); in __prb_previous_block()
1125 static void *packet_previous_rx_frame(struct packet_sock *po, in packet_previous_rx_frame() argument
1129 if (po->tp_version <= TPACKET_V2) in packet_previous_rx_frame()
1130 return packet_previous_frame(po, rb, status); in packet_previous_rx_frame()
1132 return __prb_previous_block(po, rb, status); in packet_previous_rx_frame()
1135 static void packet_increment_rx_head(struct packet_sock *po, in packet_increment_rx_head() argument
1138 switch (po->tp_version) { in packet_increment_rx_head()
1150 static void *packet_previous_frame(struct packet_sock *po, in packet_previous_frame() argument
1155 return packet_lookup_frame(po, rb, previous, status); in packet_previous_frame()
1188 static int packet_alloc_pending(struct packet_sock *po) in packet_alloc_pending() argument
1190 po->rx_ring.pending_refcnt = NULL; in packet_alloc_pending()
1192 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); in packet_alloc_pending()
1193 if (unlikely(po->tx_ring.pending_refcnt == NULL)) in packet_alloc_pending()
1199 static void packet_free_pending(struct packet_sock *po) in packet_free_pending() argument
1201 free_percpu(po->tx_ring.pending_refcnt); in packet_free_pending()
1209 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) in __tpacket_has_room() argument
1213 len = READ_ONCE(po->rx_ring.frame_max) + 1; in __tpacket_has_room()
1214 idx = READ_ONCE(po->rx_ring.head); in __tpacket_has_room()
1219 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); in __tpacket_has_room()
1222 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) in __tpacket_v3_has_room() argument
1226 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); in __tpacket_v3_has_room()
1227 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); in __tpacket_v3_has_room()
1232 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); in __tpacket_v3_has_room()
1235 static int __packet_rcv_has_room(const struct packet_sock *po, in __packet_rcv_has_room() argument
1238 const struct sock *sk = &po->sk; in __packet_rcv_has_room()
1241 if (po->prot_hook.func != tpacket_rcv) { in __packet_rcv_has_room()
1254 if (po->tp_version == TPACKET_V3) { in __packet_rcv_has_room()
1255 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) in __packet_rcv_has_room()
1257 else if (__tpacket_v3_has_room(po, 0)) in __packet_rcv_has_room()
1260 if (__tpacket_has_room(po, ROOM_POW_OFF)) in __packet_rcv_has_room()
1262 else if (__tpacket_has_room(po, 0)) in __packet_rcv_has_room()
1269 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) in packet_rcv_has_room() argument
1273 ret = __packet_rcv_has_room(po, skb); in packet_rcv_has_room()
1276 if (READ_ONCE(po->pressure) != pressure) in packet_rcv_has_room()
1277 WRITE_ONCE(po->pressure, pressure); in packet_rcv_has_room()
1282 static void packet_rcv_try_clear_pressure(struct packet_sock *po) in packet_rcv_try_clear_pressure() argument
1284 if (READ_ONCE(po->pressure) && in packet_rcv_try_clear_pressure()
1285 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) in packet_rcv_try_clear_pressure()
1286 WRITE_ONCE(po->pressure, 0); in packet_rcv_try_clear_pressure()
1304 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) in fanout_flow_is_huge() argument
1306 u32 *history = po->rollover->history; in fanout_flow_is_huge()
1359 struct packet_sock *po, *po_next, *po_skip = NULL; in fanout_demux_rollover() local
1362 po = pkt_sk(rcu_dereference(f->arr[idx])); in fanout_demux_rollover()
1365 room = packet_rcv_has_room(po, skb); in fanout_demux_rollover()
1367 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) in fanout_demux_rollover()
1369 po_skip = po; in fanout_demux_rollover()
1372 i = j = min_t(int, po->rollover->sock, num - 1); in fanout_demux_rollover()
1378 po->rollover->sock = i; in fanout_demux_rollover()
1379 atomic_long_inc(&po->rollover->num); in fanout_demux_rollover()
1381 atomic_long_inc(&po->rollover->num_huge); in fanout_demux_rollover()
1389 atomic_long_inc(&po->rollover->num_failed); in fanout_demux_rollover()
1427 struct packet_sock *po; in packet_rcv_fanout() local
1469 po = pkt_sk(rcu_dereference(f->arr[idx])); in packet_rcv_fanout()
1470 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); in packet_rcv_fanout()
1478 static void __fanout_link(struct sock *sk, struct packet_sock *po) in __fanout_link() argument
1480 struct packet_fanout *f = po->fanout; in __fanout_link()
1491 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) in __fanout_unlink() argument
1493 struct packet_fanout *f = po->fanout; in __fanout_unlink()
1548 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, in fanout_set_data_cbpf() argument
1555 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_cbpf()
1566 __fanout_set_data_bpf(po->fanout, new); in fanout_set_data_cbpf()
1570 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, in fanout_set_data_ebpf() argument
1576 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_ebpf()
1587 __fanout_set_data_bpf(po->fanout, new); in fanout_set_data_ebpf()
1591 static int fanout_set_data(struct packet_sock *po, sockptr_t data, in fanout_set_data() argument
1594 switch (po->fanout->type) { in fanout_set_data()
1596 return fanout_set_data_cbpf(po, data, len); in fanout_set_data()
1598 return fanout_set_data_ebpf(po, data, len); in fanout_set_data()
1646 struct packet_sock *po = pkt_sk(sk); in fanout_add() local
1673 if (po->fanout) in fanout_add()
1734 match->prot_hook.type = po->prot_hook.type; in fanout_add()
1735 match->prot_hook.dev = po->prot_hook.dev; in fanout_add()
1745 spin_lock(&po->bind_lock); in fanout_add()
1746 if (po->running && in fanout_add()
1748 match->prot_hook.type == po->prot_hook.type && in fanout_add()
1749 match->prot_hook.dev == po->prot_hook.dev) { in fanout_add()
1752 __dev_remove_pack(&po->prot_hook); in fanout_add()
1755 WRITE_ONCE(po->fanout, match); in fanout_add()
1757 po->rollover = rollover; in fanout_add()
1760 __fanout_link(sk, po); in fanout_add()
1764 spin_unlock(&po->bind_lock); in fanout_add()
1784 struct packet_sock *po = pkt_sk(sk); in fanout_release() local
1788 f = po->fanout; in fanout_release()
1790 po->fanout = NULL; in fanout_release()
2076 struct packet_sock *po; in packet_rcv() local
2086 po = pkt_sk(sk); in packet_rcv()
2138 if (unlikely(po->origdev)) in packet_rcv()
2161 po->stats.stats1.tp_packets++; in packet_rcv()
2170 atomic_inc(&po->tp_drops); in packet_rcv()
2190 struct packet_sock *po; in tpacket_rcv() local
2217 po = pkt_sk(sk); in tpacket_rcv()
2238 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { in tpacket_rcv()
2239 atomic_inc(&po->tp_drops); in tpacket_rcv()
2253 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + in tpacket_rcv()
2254 po->tp_reserve; in tpacket_rcv()
2257 netoff = TPACKET_ALIGN(po->tp_hdrlen + in tpacket_rcv()
2259 po->tp_reserve; in tpacket_rcv()
2260 if (po->has_vnet_hdr) { in tpacket_rcv()
2267 atomic_inc(&po->tp_drops); in tpacket_rcv()
2270 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2271 if (macoff + snaplen > po->rx_ring.frame_size) { in tpacket_rcv()
2272 if (po->copy_thresh && in tpacket_rcv()
2286 snaplen = po->rx_ring.frame_size - macoff; in tpacket_rcv()
2293 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { in tpacket_rcv()
2296 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; in tpacket_rcv()
2302 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; in tpacket_rcv()
2307 h.raw = packet_current_rx_frame(po, skb, in tpacket_rcv()
2312 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2313 slot_id = po->rx_ring.head; in tpacket_rcv()
2314 if (test_bit(slot_id, po->rx_ring.rx_owner_map)) in tpacket_rcv()
2316 __set_bit(slot_id, po->rx_ring.rx_owner_map); in tpacket_rcv()
2323 if (po->tp_version == TPACKET_V3) in tpacket_rcv()
2324 prb_clear_blk_fill_status(&po->rx_ring); in tpacket_rcv()
2328 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2329 packet_increment_rx_head(po, &po->rx_ring); in tpacket_rcv()
2336 if (atomic_read(&po->tp_drops)) in tpacket_rcv()
2340 po->stats.stats1.tp_packets++; in tpacket_rcv()
2353 po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE); in tpacket_rcv()
2359 switch (po->tp_version) { in tpacket_rcv()
2411 if (unlikely(po->origdev)) in tpacket_rcv()
2419 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2431 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2433 __packet_set_status(po, h.raw, status); in tpacket_rcv()
2434 __clear_bit(slot_id, po->rx_ring.rx_owner_map); in tpacket_rcv()
2437 } else if (po->tp_version == TPACKET_V3) { in tpacket_rcv()
2438 prb_clear_blk_fill_status(&po->rx_ring); in tpacket_rcv()
2455 atomic_inc(&po->tp_drops); in tpacket_rcv()
2465 struct packet_sock *po = pkt_sk(skb->sk); in tpacket_destruct_skb() local
2467 if (likely(po->tx_ring.pg_vec)) { in tpacket_destruct_skb()
2472 packet_dec_pending(&po->tx_ring); in tpacket_destruct_skb()
2474 ts = __packet_set_timestamp(po, ph, skb); in tpacket_destruct_skb()
2475 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); in tpacket_destruct_skb()
2477 if (!packet_read_pending(&po->tx_ring)) in tpacket_destruct_skb()
2478 complete(&po->skb_completion); in tpacket_destruct_skb()
2513 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, in tpacket_fill_skb() argument
2520 struct socket *sock = po->sk.sk_socket; in tpacket_fill_skb()
2528 skb->priority = po->sk.sk_priority; in tpacket_fill_skb()
2529 skb->mark = po->sk.sk_mark; in tpacket_fill_skb()
2566 refcount_add(to_write, &po->sk.sk_wmem_alloc); in tpacket_fill_skb()
2593 static int tpacket_parse_header(struct packet_sock *po, void *frame, in tpacket_parse_header() argument
2601 switch (po->tp_version) { in tpacket_parse_header()
2621 if (unlikely(po->tp_tx_has_off)) { in tpacket_parse_header()
2624 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); in tpacket_parse_header()
2625 off_max = po->tx_ring.frame_size - tp_len; in tpacket_parse_header()
2626 if (po->sk.sk_type == SOCK_DGRAM) { in tpacket_parse_header()
2627 switch (po->tp_version) { in tpacket_parse_header()
2639 switch (po->tp_version) { in tpacket_parse_header()
2654 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); in tpacket_parse_header()
2661 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) in tpacket_snd() argument
2680 mutex_lock(&po->pg_vec_lock); in tpacket_snd()
2685 if (unlikely(!po->tx_ring.pg_vec)) { in tpacket_snd()
2690 dev = packet_cached_dev_get(po); in tpacket_snd()
2691 proto = READ_ONCE(po->num); in tpacket_snd()
2701 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); in tpacket_snd()
2702 if (po->sk.sk_socket->type == SOCK_DGRAM) { in tpacket_snd()
2717 sockcm_init(&sockc, &po->sk); in tpacket_snd()
2719 err = sock_cmsg_send(&po->sk, msg, &sockc); in tpacket_snd()
2724 if (po->sk.sk_socket->type == SOCK_RAW) in tpacket_snd()
2726 size_max = po->tx_ring.frame_size in tpacket_snd()
2727 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); in tpacket_snd()
2729 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) in tpacket_snd()
2732 reinit_completion(&po->skb_completion); in tpacket_snd()
2735 ph = packet_current_frame(po, &po->tx_ring, in tpacket_snd()
2739 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); in tpacket_snd()
2740 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); in tpacket_snd()
2751 tp_len = tpacket_parse_header(po, ph, size_max, &data); in tpacket_snd()
2758 if (po->has_vnet_hdr) { in tpacket_snd()
2771 skb = sock_alloc_send_skb(&po->sk, in tpacket_snd()
2782 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, in tpacket_snd()
2786 !po->has_vnet_hdr && in tpacket_snd()
2792 if (po->tp_loss) { in tpacket_snd()
2793 __packet_set_status(po, ph, in tpacket_snd()
2795 packet_increment_head(&po->tx_ring); in tpacket_snd()
2805 if (po->has_vnet_hdr) { in tpacket_snd()
2814 __packet_set_status(po, ph, TP_STATUS_SENDING); in tpacket_snd()
2815 packet_inc_pending(&po->tx_ring); in tpacket_snd()
2818 err = po->xmit(skb); in tpacket_snd()
2822 if (err && __packet_get_status(po, ph) == in tpacket_snd()
2834 packet_increment_head(&po->tx_ring); in tpacket_snd()
2843 (need_wait && packet_read_pending(&po->tx_ring)))); in tpacket_snd()
2849 __packet_set_status(po, ph, status); in tpacket_snd()
2854 mutex_unlock(&po->pg_vec_lock); in tpacket_snd()
2894 struct packet_sock *po = pkt_sk(sk); in packet_snd() local
2904 dev = packet_cached_dev_get(po); in packet_snd()
2905 proto = READ_ONCE(po->num); in packet_snd()
2939 if (po->has_vnet_hdr) { in packet_snd()
3021 err = po->xmit(skb); in packet_snd()
3045 struct packet_sock *po = pkt_sk(sk); in packet_sendmsg() local
3050 if (data_race(po->tx_ring.pg_vec)) in packet_sendmsg()
3051 return tpacket_snd(po, msg); in packet_sendmsg()
3064 struct packet_sock *po; in packet_release() local
3073 po = pkt_sk(sk); in packet_release()
3083 spin_lock(&po->bind_lock); in packet_release()
3085 packet_cached_dev_reset(po); in packet_release()
3087 if (po->prot_hook.dev) { in packet_release()
3088 dev_put(po->prot_hook.dev); in packet_release()
3089 po->prot_hook.dev = NULL; in packet_release()
3091 spin_unlock(&po->bind_lock); in packet_release()
3096 if (po->rx_ring.pg_vec) { in packet_release()
3101 if (po->tx_ring.pg_vec) { in packet_release()
3111 kfree(po->rollover); in packet_release()
3125 packet_free_pending(po); in packet_release()
3139 struct packet_sock *po = pkt_sk(sk); in packet_do_bind() local
3148 spin_lock(&po->bind_lock); in packet_do_bind()
3151 if (po->fanout) { in packet_do_bind()
3173 proto_curr = po->prot_hook.type; in packet_do_bind()
3174 dev_curr = po->prot_hook.dev; in packet_do_bind()
3179 if (po->running) { in packet_do_bind()
3184 WRITE_ONCE(po->num, 0); in packet_do_bind()
3187 dev_curr = po->prot_hook.dev; in packet_do_bind()
3193 BUG_ON(po->running); in packet_do_bind()
3194 WRITE_ONCE(po->num, proto); in packet_do_bind()
3195 po->prot_hook.type = proto; in packet_do_bind()
3199 po->prot_hook.dev = NULL; in packet_do_bind()
3200 WRITE_ONCE(po->ifindex, -1); in packet_do_bind()
3201 packet_cached_dev_reset(po); in packet_do_bind()
3203 po->prot_hook.dev = dev; in packet_do_bind()
3204 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); in packet_do_bind()
3205 packet_cached_dev_assign(po, dev); in packet_do_bind()
3224 spin_unlock(&po->bind_lock); in packet_do_bind()
3286 struct packet_sock *po; in packet_create() local
3309 po = pkt_sk(sk); in packet_create()
3310 init_completion(&po->skb_completion); in packet_create()
3312 po->num = proto; in packet_create()
3313 po->xmit = dev_queue_xmit; in packet_create()
3315 err = packet_alloc_pending(po); in packet_create()
3319 packet_cached_dev_reset(po); in packet_create()
3328 spin_lock_init(&po->bind_lock); in packet_create()
3329 mutex_init(&po->pg_vec_lock); in packet_create()
3330 po->rollover = NULL; in packet_create()
3331 po->prot_hook.func = packet_rcv; in packet_create()
3334 po->prot_hook.func = packet_rcv_spkt; in packet_create()
3336 po->prot_hook.af_packet_priv = sk; in packet_create()
3337 po->prot_hook.af_packet_net = sock_net(sk); in packet_create()
3340 po->prot_hook.type = proto; in packet_create()
3537 struct packet_sock *po = pkt_sk(sk); in packet_getname() local
3544 ifindex = READ_ONCE(po->ifindex); in packet_getname()
3547 sll->sll_protocol = READ_ONCE(po->num); in packet_getname()
3611 struct packet_sock *po = pkt_sk(sk); in packet_mc_add() local
3633 for (ml = po->mclist; ml; ml = ml->next) { in packet_mc_add()
3651 i->next = po->mclist; in packet_mc_add()
3652 po->mclist = i; in packet_mc_add()
3655 po->mclist = i->next; in packet_mc_add()
3692 struct packet_sock *po = pkt_sk(sk); in packet_flush_mclist() local
3695 if (!po->mclist) in packet_flush_mclist()
3699 while ((ml = po->mclist) != NULL) { in packet_flush_mclist()
3702 po->mclist = ml->next; in packet_flush_mclist()
3716 struct packet_sock *po = pkt_sk(sk); in packet_setsockopt() local
3751 switch (po->tp_version) { in packet_setsockopt()
3802 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3805 po->tp_version = val; in packet_setsockopt()
3822 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3825 po->tp_reserve = val; in packet_setsockopt()
3841 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3844 po->tp_loss = !!val; in packet_setsockopt()
3860 po->auxdata = !!val; in packet_setsockopt()
3874 po->origdev = !!val; in packet_setsockopt()
3890 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3893 po->has_vnet_hdr = !!val; in packet_setsockopt()
3908 po->tp_tstamp = val; in packet_setsockopt()
3925 if (!READ_ONCE(po->fanout)) in packet_setsockopt()
3928 return fanout_set_data(po, optval, optlen); in packet_setsockopt()
3941 po->prot_hook.ignore_outgoing = !!val; in packet_setsockopt()
3954 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { in packet_setsockopt()
3957 po->tp_tx_has_off = !!val; in packet_setsockopt()
3972 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; in packet_setsockopt()
3986 struct packet_sock *po = pkt_sk(sk); in packet_getsockopt() local
4004 memcpy(&st, &po->stats, sizeof(st)); in packet_getsockopt()
4005 memset(&po->stats, 0, sizeof(po->stats)); in packet_getsockopt()
4007 drops = atomic_xchg(&po->tp_drops, 0); in packet_getsockopt()
4009 if (po->tp_version == TPACKET_V3) { in packet_getsockopt()
4023 val = po->auxdata; in packet_getsockopt()
4026 val = po->origdev; in packet_getsockopt()
4029 val = po->has_vnet_hdr; in packet_getsockopt()
4032 val = po->tp_version; in packet_getsockopt()
4056 val = po->tp_reserve; in packet_getsockopt()
4059 val = po->tp_loss; in packet_getsockopt()
4062 val = po->tp_tstamp; in packet_getsockopt()
4065 val = (po->fanout ? in packet_getsockopt()
4066 ((u32)po->fanout->id | in packet_getsockopt()
4067 ((u32)po->fanout->type << 16) | in packet_getsockopt()
4068 ((u32)po->fanout->flags << 24)) : in packet_getsockopt()
4072 val = po->prot_hook.ignore_outgoing; in packet_getsockopt()
4075 if (!po->rollover) in packet_getsockopt()
4077 rstats.tp_all = atomic_long_read(&po->rollover->num); in packet_getsockopt()
4078 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); in packet_getsockopt()
4079 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); in packet_getsockopt()
4084 val = po->tp_tx_has_off; in packet_getsockopt()
4087 val = packet_use_direct_xmit(po); in packet_getsockopt()
4111 struct packet_sock *po = pkt_sk(sk); in packet_notifier() local
4115 if (po->mclist) in packet_notifier()
4116 packet_dev_mclist_delete(dev, &po->mclist); in packet_notifier()
4120 if (dev->ifindex == po->ifindex) { in packet_notifier()
4121 spin_lock(&po->bind_lock); in packet_notifier()
4122 if (po->running) { in packet_notifier()
4129 packet_cached_dev_reset(po); in packet_notifier()
4130 WRITE_ONCE(po->ifindex, -1); in packet_notifier()
4131 if (po->prot_hook.dev) in packet_notifier()
4132 dev_put(po->prot_hook.dev); in packet_notifier()
4133 po->prot_hook.dev = NULL; in packet_notifier()
4135 spin_unlock(&po->bind_lock); in packet_notifier()
4139 if (dev->ifindex == po->ifindex) { in packet_notifier()
4140 spin_lock(&po->bind_lock); in packet_notifier()
4141 if (po->num) in packet_notifier()
4143 spin_unlock(&po->bind_lock); in packet_notifier()
4205 struct packet_sock *po = pkt_sk(sk); in packet_poll() local
4209 if (po->rx_ring.pg_vec) { in packet_poll()
4210 if (!packet_previous_rx_frame(po, &po->rx_ring, in packet_poll()
4214 packet_rcv_try_clear_pressure(po); in packet_poll()
4217 if (po->tx_ring.pg_vec) { in packet_poll()
4218 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) in packet_poll()
4327 struct packet_sock *po = pkt_sk(sk); in packet_set_ring() local
4337 rb = tx_ring ? &po->tx_ring : &po->rx_ring; in packet_set_ring()
4342 if (atomic_read(&po->mapped)) in packet_set_ring()
4356 switch (po->tp_version) { in packet_set_ring()
4358 po->tp_hdrlen = TPACKET_HDRLEN; in packet_set_ring()
4361 po->tp_hdrlen = TPACKET2_HDRLEN; in packet_set_ring()
4364 po->tp_hdrlen = TPACKET3_HDRLEN; in packet_set_ring()
4373 min_frame_size = po->tp_hdrlen + po->tp_reserve; in packet_set_ring()
4374 if (po->tp_version >= TPACKET_V3 && in packet_set_ring()
4397 switch (po->tp_version) { in packet_set_ring()
4401 init_prb_bdqc(po, rb, pg_vec, req_u); in packet_set_ring()
4432 spin_lock(&po->bind_lock); in packet_set_ring()
4433 was_running = po->running; in packet_set_ring()
4434 num = po->num; in packet_set_ring()
4436 WRITE_ONCE(po->num, 0); in packet_set_ring()
4439 spin_unlock(&po->bind_lock); in packet_set_ring()
4444 mutex_lock(&po->pg_vec_lock); in packet_set_ring()
4445 if (closing || atomic_read(&po->mapped) == 0) { in packet_set_ring()
4449 if (po->tp_version <= TPACKET_V2) in packet_set_ring()
4460 po->prot_hook.func = (po->rx_ring.pg_vec) ? in packet_set_ring()
4463 if (atomic_read(&po->mapped)) in packet_set_ring()
4465 atomic_read(&po->mapped)); in packet_set_ring()
4467 mutex_unlock(&po->pg_vec_lock); in packet_set_ring()
4469 spin_lock(&po->bind_lock); in packet_set_ring()
4471 WRITE_ONCE(po->num, num); in packet_set_ring()
4474 spin_unlock(&po->bind_lock); in packet_set_ring()
4475 if (pg_vec && (po->tp_version > TPACKET_V2)) { in packet_set_ring()
4478 prb_shutdown_retire_blk_timer(po, rb_queue); in packet_set_ring()
4494 struct packet_sock *po = pkt_sk(sk); in packet_mmap() local
4504 mutex_lock(&po->pg_vec_lock); in packet_mmap()
4507 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { in packet_mmap()
4523 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { in packet_mmap()
4543 atomic_inc(&po->mapped); in packet_mmap()
4548 mutex_unlock(&po->pg_vec_lock); in packet_mmap()
4633 const struct packet_sock *po = pkt_sk(s); in packet_seq_show() local
4640 ntohs(READ_ONCE(po->num)), in packet_seq_show()
4641 READ_ONCE(po->ifindex), in packet_seq_show()
4642 po->running, in packet_seq_show()