1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _SUNVNETCOMMON_H
3*4882a593Smuzhiyun #define _SUNVNETCOMMON_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/interrupt.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /* length of time (or less) we expect pending descriptors to be marked
8*4882a593Smuzhiyun * as VIO_DESC_DONE and skbs ready to be freed
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #define VNET_CLEAN_TIMEOUT ((HZ / 100) + 1)
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
13*4882a593Smuzhiyun #define VNET_TX_RING_SIZE 512
14*4882a593Smuzhiyun #define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
17*4882a593Smuzhiyun #define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define VNET_MAX_MTU 65535
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* VNET packets are sent in buffers with the first 6 bytes skipped
22*4882a593Smuzhiyun * so that after the ethernet header the IPv4/IPv6 headers are aligned
23*4882a593Smuzhiyun * properly.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun #define VNET_PACKET_SKIP 6
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define VNET_MAXCOOKIES (VNET_MAXPACKET / PAGE_SIZE + 1)
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define VNET_MAX_TXQS 16
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct vnet_tx_entry {
32*4882a593Smuzhiyun struct sk_buff *skb;
33*4882a593Smuzhiyun unsigned int ncookies;
34*4882a593Smuzhiyun struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct vnet;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct vnet_port_stats {
40*4882a593Smuzhiyun /* keep them all the same size */
41*4882a593Smuzhiyun u32 rx_bytes;
42*4882a593Smuzhiyun u32 tx_bytes;
43*4882a593Smuzhiyun u32 rx_packets;
44*4882a593Smuzhiyun u32 tx_packets;
45*4882a593Smuzhiyun u32 event_up;
46*4882a593Smuzhiyun u32 event_reset;
47*4882a593Smuzhiyun u32 q_placeholder;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define NUM_VNET_PORT_STATS (sizeof(struct vnet_port_stats) / sizeof(u32))
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Structure to describe a vnet-port or vsw-port in the MD.
53*4882a593Smuzhiyun * If the vsw bit is set, this structure represents a vswitch
54*4882a593Smuzhiyun * port, and the net_device can be found from ->dev. If the
55*4882a593Smuzhiyun * vsw bit is not set, the net_device is available from ->vp->dev.
56*4882a593Smuzhiyun * See the VNET_PORT_TO_NET_DEVICE macro below.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun struct vnet_port {
59*4882a593Smuzhiyun struct vio_driver_state vio;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct vnet_port_stats stats;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct hlist_node hash;
64*4882a593Smuzhiyun u8 raddr[ETH_ALEN];
65*4882a593Smuzhiyun unsigned switch_port:1;
66*4882a593Smuzhiyun unsigned tso:1;
67*4882a593Smuzhiyun unsigned vsw:1;
68*4882a593Smuzhiyun unsigned __pad:13;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct vnet *vp;
71*4882a593Smuzhiyun struct net_device *dev;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun struct list_head list;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun u32 stop_rx_idx;
78*4882a593Smuzhiyun bool stop_rx;
79*4882a593Smuzhiyun bool start_cons;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun struct timer_list clean_timer;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun u64 rmtu;
84*4882a593Smuzhiyun u16 tsolen;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct napi_struct napi;
87*4882a593Smuzhiyun u32 napi_stop_idx;
88*4882a593Smuzhiyun bool napi_resume;
89*4882a593Smuzhiyun int rx_event;
90*4882a593Smuzhiyun u16 q_index;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
to_vnet_port(struct vio_driver_state * vio)93*4882a593Smuzhiyun static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun return container_of(vio, struct vnet_port, vio);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define VNET_PORT_HASH_SIZE 16
99*4882a593Smuzhiyun #define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
100*4882a593Smuzhiyun
vnet_hashfn(u8 * mac)101*4882a593Smuzhiyun static inline unsigned int vnet_hashfn(u8 *mac)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun unsigned int val = mac[4] ^ mac[5];
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return val & (VNET_PORT_HASH_MASK);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct vnet_mcast_entry {
109*4882a593Smuzhiyun u8 addr[ETH_ALEN];
110*4882a593Smuzhiyun u8 sent;
111*4882a593Smuzhiyun u8 hit;
112*4882a593Smuzhiyun struct vnet_mcast_entry *next;
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct vnet {
116*4882a593Smuzhiyun spinlock_t lock; /* Protects port_list and port_hash. */
117*4882a593Smuzhiyun struct net_device *dev;
118*4882a593Smuzhiyun u32 msg_enable;
119*4882a593Smuzhiyun u8 q_used[VNET_MAX_TXQS];
120*4882a593Smuzhiyun struct list_head port_list;
121*4882a593Smuzhiyun struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
122*4882a593Smuzhiyun struct vnet_mcast_entry *mcast_list;
123*4882a593Smuzhiyun struct list_head list;
124*4882a593Smuzhiyun u64 local_mac;
125*4882a593Smuzhiyun int nports;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Def used by common code to get the net_device from the proper location */
129*4882a593Smuzhiyun #define VNET_PORT_TO_NET_DEVICE(__port) \
130*4882a593Smuzhiyun ((__port)->vsw ? (__port)->dev : (__port)->vp->dev)
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Common funcs */
133*4882a593Smuzhiyun void sunvnet_clean_timer_expire_common(struct timer_list *t);
134*4882a593Smuzhiyun int sunvnet_open_common(struct net_device *dev);
135*4882a593Smuzhiyun int sunvnet_close_common(struct net_device *dev);
136*4882a593Smuzhiyun void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
137*4882a593Smuzhiyun int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
138*4882a593Smuzhiyun void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue);
139*4882a593Smuzhiyun netdev_tx_t
140*4882a593Smuzhiyun sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
141*4882a593Smuzhiyun struct vnet_port *(*vnet_tx_port)
142*4882a593Smuzhiyun (struct sk_buff *, struct net_device *));
143*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
144*4882a593Smuzhiyun void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp);
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun void sunvnet_event_common(void *arg, int event);
147*4882a593Smuzhiyun int sunvnet_send_attr_common(struct vio_driver_state *vio);
148*4882a593Smuzhiyun int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
149*4882a593Smuzhiyun void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
150*4882a593Smuzhiyun int sunvnet_poll_common(struct napi_struct *napi, int budget);
151*4882a593Smuzhiyun void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
152*4882a593Smuzhiyun void vnet_port_reset(struct vnet_port *port);
153*4882a593Smuzhiyun bool sunvnet_port_is_up_common(struct vnet_port *vnet);
154*4882a593Smuzhiyun void sunvnet_port_add_txq_common(struct vnet_port *port);
155*4882a593Smuzhiyun void sunvnet_port_rm_txq_common(struct vnet_port *port);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #endif /* _SUNVNETCOMMON_H */
158