Lines Matching refs:skb

148 bool ip_call_ra_chain(struct sk_buff *skb)  in ip_call_ra_chain()  argument
151 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
153 struct net_device *dev = skb->dev; in ip_call_ra_chain()
165 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain()
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
179 raw_rcv(last, skb); in ip_call_ra_chain()
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol) in ip_protocol_deliver_rcu() argument
193 raw = raw_local_deliver(skb, protocol); in ip_protocol_deliver_rcu()
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
199 kfree_skb(skb); in ip_protocol_deliver_rcu()
202 nf_reset_ct(skb); in ip_protocol_deliver_rcu()
205 skb); in ip_protocol_deliver_rcu()
213 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
215 icmp_send(skb, ICMP_DEST_UNREACH, in ip_protocol_deliver_rcu()
218 kfree_skb(skb); in ip_protocol_deliver_rcu()
221 consume_skb(skb); in ip_protocol_deliver_rcu()
226 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_local_deliver_finish() argument
228 __skb_pull(skb, skb_network_header_len(skb)); in ip_local_deliver_finish()
231 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol); in ip_local_deliver_finish()
240 int ip_local_deliver(struct sk_buff *skb) in ip_local_deliver() argument
245 struct net *net = dev_net(skb->dev); in ip_local_deliver()
247 if (ip_is_fragment(ip_hdr(skb))) { in ip_local_deliver()
248 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER)) in ip_local_deliver()
253 net, NULL, skb, skb->dev, NULL, in ip_local_deliver()
257 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev) in ip_rcv_options() argument
269 if (skb_cow(skb, skb_headroom(skb))) { in ip_rcv_options()
274 iph = ip_hdr(skb); in ip_rcv_options()
275 opt = &(IPCB(skb)->opt); in ip_rcv_options()
278 if (ip_options_compile(dev_net(dev), opt, skb)) { in ip_rcv_options()
296 if (ip_options_rcv_srr(skb, dev)) in ip_rcv_options()
305 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, in ip_can_use_hint() argument
308 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr && in ip_can_use_hint()
312 int tcp_v4_early_demux(struct sk_buff *skb);
313 int udp_v4_early_demux(struct sk_buff *skb);
315 struct sk_buff *skb, struct net_device *dev, in ip_rcv_finish_core() argument
318 const struct iphdr *iph = ip_hdr(skb); in ip_rcv_finish_core()
322 if (ip_can_use_hint(skb, iph, hint)) { in ip_rcv_finish_core()
323 err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos, in ip_rcv_finish_core()
330 !skb_dst(skb) && in ip_rcv_finish_core()
331 !skb->sk && in ip_rcv_finish_core()
336 tcp_v4_early_demux(skb); in ip_rcv_finish_core()
339 iph = ip_hdr(skb); in ip_rcv_finish_core()
344 err = udp_v4_early_demux(skb); in ip_rcv_finish_core()
349 iph = ip_hdr(skb); in ip_rcv_finish_core()
359 if (!skb_valid_dst(skb)) { in ip_rcv_finish_core()
360 err = ip_route_input_noref(skb, iph->daddr, iph->saddr, in ip_rcv_finish_core()
367 if (unlikely(skb_dst(skb)->tclassid)) { in ip_rcv_finish_core()
369 u32 idx = skb_dst(skb)->tclassid; in ip_rcv_finish_core()
371 st[idx&0xFF].o_bytes += skb->len; in ip_rcv_finish_core()
373 st[(idx>>16)&0xFF].i_bytes += skb->len; in ip_rcv_finish_core()
377 if (iph->ihl > 5 && ip_rcv_options(skb, dev)) in ip_rcv_finish_core()
380 rt = skb_rtable(skb); in ip_rcv_finish_core()
382 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len); in ip_rcv_finish_core()
384 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len); in ip_rcv_finish_core()
385 } else if (skb->pkt_type == PACKET_BROADCAST || in ip_rcv_finish_core()
386 skb->pkt_type == PACKET_MULTICAST) { in ip_rcv_finish_core()
412 kfree_skb(skb); in ip_rcv_finish_core()
421 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_rcv_finish() argument
423 struct net_device *dev = skb->dev; in ip_rcv_finish()
429 skb = l3mdev_ip_rcv(skb); in ip_rcv_finish()
430 if (!skb) in ip_rcv_finish()
433 ret = ip_rcv_finish_core(net, sk, skb, dev, NULL); in ip_rcv_finish()
435 ret = dst_input(skb); in ip_rcv_finish()
442 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) in ip_rcv_core() argument
450 if (skb->pkt_type == PACKET_OTHERHOST) in ip_rcv_core()
453 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); in ip_rcv_core()
455 skb = skb_share_check(skb, GFP_ATOMIC); in ip_rcv_core()
456 if (!skb) { in ip_rcv_core()
461 if (!pskb_may_pull(skb, sizeof(struct iphdr))) in ip_rcv_core()
464 iph = ip_hdr(skb); in ip_rcv_core()
485 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); in ip_rcv_core()
487 if (!pskb_may_pull(skb, iph->ihl*4)) in ip_rcv_core()
490 iph = ip_hdr(skb); in ip_rcv_core()
496 if (skb->len < len) { in ip_rcv_core()
506 if (pskb_trim_rcsum(skb, len)) { in ip_rcv_core()
511 iph = ip_hdr(skb); in ip_rcv_core()
512 skb->transport_header = skb->network_header + iph->ihl*4; in ip_rcv_core()
515 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); in ip_rcv_core()
516 IPCB(skb)->iif = skb->skb_iif; in ip_rcv_core()
519 if (!skb_sk_is_prefetched(skb)) in ip_rcv_core()
520 skb_orphan(skb); in ip_rcv_core()
522 return skb; in ip_rcv_core()
529 kfree_skb(skb); in ip_rcv_core()
537 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, in ip_rcv() argument
542 skb = ip_rcv_core(skb, net); in ip_rcv()
543 if (skb == NULL) in ip_rcv()
547 net, NULL, skb, dev, NULL, in ip_rcv()
553 struct sk_buff *skb, *next; in ip_sublist_rcv_finish() local
555 list_for_each_entry_safe(skb, next, head, list) { in ip_sublist_rcv_finish()
556 skb_list_del_init(skb); in ip_sublist_rcv_finish()
557 dst_input(skb); in ip_sublist_rcv_finish()
562 struct sk_buff *skb, int rt_type) in ip_extract_route_hint() argument
567 return skb; in ip_extract_route_hint()
573 struct sk_buff *skb, *next, *hint = NULL; in ip_list_rcv_finish() local
578 list_for_each_entry_safe(skb, next, head, list) { in ip_list_rcv_finish()
579 struct net_device *dev = skb->dev; in ip_list_rcv_finish()
582 skb_list_del_init(skb); in ip_list_rcv_finish()
586 skb = l3mdev_ip_rcv(skb); in ip_list_rcv_finish()
587 if (!skb) in ip_list_rcv_finish()
589 if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP) in ip_list_rcv_finish()
592 dst = skb_dst(skb); in ip_list_rcv_finish()
594 hint = ip_extract_route_hint(net, skb, in ip_list_rcv_finish()
604 list_add_tail(&skb->list, &sublist); in ip_list_rcv_finish()
624 struct sk_buff *skb, *next; in ip_list_rcv() local
628 list_for_each_entry_safe(skb, next, head, list) { in ip_list_rcv()
629 struct net_device *dev = skb->dev; in ip_list_rcv()
632 skb_list_del_init(skb); in ip_list_rcv()
633 skb = ip_rcv_core(skb, net); in ip_list_rcv()
634 if (skb == NULL) in ip_list_rcv()
646 list_add_tail(&skb->list, &sublist); in ip_list_rcv()