1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * linux/can/skb.h 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Definitions for the CAN network socket buffer 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * Copyright (C) 2012 Oliver Hartkopp <socketcan@hartkopp.net> 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun */ 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #ifndef _CAN_SKB_H 12*4882a593Smuzhiyun #define _CAN_SKB_H 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <linux/types.h> 15*4882a593Smuzhiyun #include <linux/skbuff.h> 16*4882a593Smuzhiyun #include <linux/can.h> 17*4882a593Smuzhiyun #include <net/sock.h> 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun /* 20*4882a593Smuzhiyun * The struct can_skb_priv is used to transport additional information along 21*4882a593Smuzhiyun * with the stored struct can(fd)_frame that can not be contained in existing 22*4882a593Smuzhiyun * struct sk_buff elements. 23*4882a593Smuzhiyun * N.B. that this information must not be modified in cloned CAN sk_buffs. 24*4882a593Smuzhiyun * To modify the CAN frame content or the struct can_skb_priv content 25*4882a593Smuzhiyun * skb_copy() needs to be used instead of skb_clone(). 26*4882a593Smuzhiyun */ 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun /** 29*4882a593Smuzhiyun * struct can_skb_priv - private additional data inside CAN sk_buffs 30*4882a593Smuzhiyun * @ifindex: ifindex of the first interface the CAN frame appeared on 31*4882a593Smuzhiyun * @skbcnt: atomic counter to have an unique id together with skb pointer 32*4882a593Smuzhiyun * @cf: align to the following CAN frame at skb->data 33*4882a593Smuzhiyun */ 34*4882a593Smuzhiyun struct can_skb_priv { 35*4882a593Smuzhiyun int ifindex; 36*4882a593Smuzhiyun int skbcnt; 37*4882a593Smuzhiyun struct can_frame cf[]; 38*4882a593Smuzhiyun }; 39*4882a593Smuzhiyun can_skb_prv(struct sk_buff * skb)40*4882a593Smuzhiyunstatic inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) 41*4882a593Smuzhiyun { 42*4882a593Smuzhiyun return (struct can_skb_priv *)(skb->head); 43*4882a593Smuzhiyun } 44*4882a593Smuzhiyun can_skb_reserve(struct sk_buff * skb)45*4882a593Smuzhiyunstatic inline void can_skb_reserve(struct sk_buff *skb) 46*4882a593Smuzhiyun { 47*4882a593Smuzhiyun skb_reserve(skb, sizeof(struct can_skb_priv)); 48*4882a593Smuzhiyun } 49*4882a593Smuzhiyun can_skb_set_owner(struct sk_buff * skb,struct sock * sk)50*4882a593Smuzhiyunstatic inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) 51*4882a593Smuzhiyun { 52*4882a593Smuzhiyun /* If the socket has already been closed by user space, the 53*4882a593Smuzhiyun * refcount may already be 0 (and the socket will be freed 54*4882a593Smuzhiyun * after the last TX skb has been freed). So only increase 55*4882a593Smuzhiyun * socket refcount if the refcount is > 0. 56*4882a593Smuzhiyun */ 57*4882a593Smuzhiyun if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { 58*4882a593Smuzhiyun skb->destructor = sock_efree; 59*4882a593Smuzhiyun skb->sk = sk; 60*4882a593Smuzhiyun } 61*4882a593Smuzhiyun } 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun /* 64*4882a593Smuzhiyun * returns an unshared skb owned by the original sock to be echo'ed back 65*4882a593Smuzhiyun */ can_create_echo_skb(struct sk_buff * skb)66*4882a593Smuzhiyunstatic inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) 67*4882a593Smuzhiyun { 68*4882a593Smuzhiyun struct sk_buff *nskb; 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun nskb = skb_clone(skb, GFP_ATOMIC); 71*4882a593Smuzhiyun if (unlikely(!nskb)) { 72*4882a593Smuzhiyun kfree_skb(skb); 73*4882a593Smuzhiyun return NULL; 74*4882a593Smuzhiyun } 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun can_skb_set_owner(nskb, skb->sk); 77*4882a593Smuzhiyun consume_skb(skb); 78*4882a593Smuzhiyun return nskb; 79*4882a593Smuzhiyun } 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun #endif /* !_CAN_SKB_H */ 82