1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IBM Power Virtual Ethernet Device Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) IBM Corporation, 2003, 2010
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Authors: Dave Larson <larson1@us.ibm.com>
8*4882a593Smuzhiyun * Santiago Leon <santil@linux.vnet.ibm.com>
9*4882a593Smuzhiyun * Brian King <brking@linux.vnet.ibm.com>
10*4882a593Smuzhiyun * Robert Jennings <rcj@linux.vnet.ibm.com>
11*4882a593Smuzhiyun * Anton Blanchard <anton@au.ibm.com>
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifndef _IBMVETH_H
15*4882a593Smuzhiyun #define _IBMVETH_H
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* constants for H_MULTICAST_CTRL */
18*4882a593Smuzhiyun #define IbmVethMcastReceptionModifyBit 0x80000UL
19*4882a593Smuzhiyun #define IbmVethMcastReceptionEnableBit 0x20000UL
20*4882a593Smuzhiyun #define IbmVethMcastFilterModifyBit 0x40000UL
21*4882a593Smuzhiyun #define IbmVethMcastFilterEnableBit 0x10000UL
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
24*4882a593Smuzhiyun #define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
25*4882a593Smuzhiyun #define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
26*4882a593Smuzhiyun #define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
27*4882a593Smuzhiyun #define IbmVethMcastAddFilter 0x1UL
28*4882a593Smuzhiyun #define IbmVethMcastRemoveFilter 0x2UL
29*4882a593Smuzhiyun #define IbmVethMcastClearFilterTable 0x3UL
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
32*4882a593Smuzhiyun #define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
33*4882a593Smuzhiyun #define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
34*4882a593Smuzhiyun #define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
35*4882a593Smuzhiyun #define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
36*4882a593Smuzhiyun #define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
37*4882a593Smuzhiyun #define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* hcall macros */
40*4882a593Smuzhiyun #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
41*4882a593Smuzhiyun plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define h_free_logical_lan(ua) \
44*4882a593Smuzhiyun plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define h_add_logical_lan_buffer(ua, buf) \
47*4882a593Smuzhiyun plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
48*4882a593Smuzhiyun
h_send_logical_lan(unsigned long unit_address,unsigned long desc1,unsigned long desc2,unsigned long desc3,unsigned long desc4,unsigned long desc5,unsigned long desc6,unsigned long corellator_in,unsigned long * corellator_out,unsigned long mss,unsigned long large_send_support)49*4882a593Smuzhiyun static inline long h_send_logical_lan(unsigned long unit_address,
50*4882a593Smuzhiyun unsigned long desc1, unsigned long desc2, unsigned long desc3,
51*4882a593Smuzhiyun unsigned long desc4, unsigned long desc5, unsigned long desc6,
52*4882a593Smuzhiyun unsigned long corellator_in, unsigned long *corellator_out,
53*4882a593Smuzhiyun unsigned long mss, unsigned long large_send_support)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun long rc;
56*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (large_send_support)
59*4882a593Smuzhiyun rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
60*4882a593Smuzhiyun desc1, desc2, desc3, desc4, desc5, desc6,
61*4882a593Smuzhiyun corellator_in, mss);
62*4882a593Smuzhiyun else
63*4882a593Smuzhiyun rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
64*4882a593Smuzhiyun desc1, desc2, desc3, desc4, desc5, desc6,
65*4882a593Smuzhiyun corellator_in);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun *corellator_out = retbuf[0];
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return rc;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
h_illan_attributes(unsigned long unit_address,unsigned long reset_mask,unsigned long set_mask,unsigned long * ret_attributes)72*4882a593Smuzhiyun static inline long h_illan_attributes(unsigned long unit_address,
73*4882a593Smuzhiyun unsigned long reset_mask, unsigned long set_mask,
74*4882a593Smuzhiyun unsigned long *ret_attributes)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun long rc;
77*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
80*4882a593Smuzhiyun reset_mask, set_mask);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun *ret_attributes = retbuf[0];
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return rc;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define h_multicast_ctrl(ua, cmd, mac) \
88*4882a593Smuzhiyun plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define h_change_logical_lan_mac(ua, mac) \
91*4882a593Smuzhiyun plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define IBMVETH_NUM_BUFF_POOLS 5
94*4882a593Smuzhiyun #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
95*4882a593Smuzhiyun #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
96*4882a593Smuzhiyun #define IBMVETH_MIN_MTU 68
97*4882a593Smuzhiyun #define IBMVETH_MAX_POOL_COUNT 4096
98*4882a593Smuzhiyun #define IBMVETH_BUFF_LIST_SIZE 4096
99*4882a593Smuzhiyun #define IBMVETH_FILT_LIST_SIZE 4096
100*4882a593Smuzhiyun #define IBMVETH_MAX_BUF_SIZE (1024 * 128)
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
103*4882a593Smuzhiyun static int pool_count[] = { 256, 512, 256, 256, 256 };
104*4882a593Smuzhiyun static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
105*4882a593Smuzhiyun static int pool_active[] = { 1, 1, 0, 0, 1};
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #define IBM_VETH_INVALID_MAP ((u16)0xffff)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun struct ibmveth_buff_pool {
110*4882a593Smuzhiyun u32 size;
111*4882a593Smuzhiyun u32 index;
112*4882a593Smuzhiyun u32 buff_size;
113*4882a593Smuzhiyun u32 threshold;
114*4882a593Smuzhiyun atomic_t available;
115*4882a593Smuzhiyun u32 consumer_index;
116*4882a593Smuzhiyun u32 producer_index;
117*4882a593Smuzhiyun u16 *free_map;
118*4882a593Smuzhiyun dma_addr_t *dma_addr;
119*4882a593Smuzhiyun struct sk_buff **skbuff;
120*4882a593Smuzhiyun int active;
121*4882a593Smuzhiyun struct kobject kobj;
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun struct ibmveth_rx_q {
125*4882a593Smuzhiyun u64 index;
126*4882a593Smuzhiyun u64 num_slots;
127*4882a593Smuzhiyun u64 toggle;
128*4882a593Smuzhiyun dma_addr_t queue_dma;
129*4882a593Smuzhiyun u32 queue_len;
130*4882a593Smuzhiyun struct ibmveth_rx_q_entry *queue_addr;
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun struct ibmveth_adapter {
134*4882a593Smuzhiyun struct vio_dev *vdev;
135*4882a593Smuzhiyun struct net_device *netdev;
136*4882a593Smuzhiyun struct napi_struct napi;
137*4882a593Smuzhiyun unsigned int mcastFilterSize;
138*4882a593Smuzhiyun void * buffer_list_addr;
139*4882a593Smuzhiyun void * filter_list_addr;
140*4882a593Smuzhiyun dma_addr_t buffer_list_dma;
141*4882a593Smuzhiyun dma_addr_t filter_list_dma;
142*4882a593Smuzhiyun struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
143*4882a593Smuzhiyun struct ibmveth_rx_q rx_queue;
144*4882a593Smuzhiyun int pool_config;
145*4882a593Smuzhiyun int rx_csum;
146*4882a593Smuzhiyun int large_send;
147*4882a593Smuzhiyun bool is_active_trunk;
148*4882a593Smuzhiyun void *bounce_buffer;
149*4882a593Smuzhiyun dma_addr_t bounce_buffer_dma;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun u64 fw_ipv6_csum_support;
152*4882a593Smuzhiyun u64 fw_ipv4_csum_support;
153*4882a593Smuzhiyun u64 fw_large_send_support;
154*4882a593Smuzhiyun /* adapter specific stats */
155*4882a593Smuzhiyun u64 replenish_task_cycles;
156*4882a593Smuzhiyun u64 replenish_no_mem;
157*4882a593Smuzhiyun u64 replenish_add_buff_failure;
158*4882a593Smuzhiyun u64 replenish_add_buff_success;
159*4882a593Smuzhiyun u64 rx_invalid_buffer;
160*4882a593Smuzhiyun u64 rx_no_buffer;
161*4882a593Smuzhiyun u64 tx_map_failed;
162*4882a593Smuzhiyun u64 tx_send_failed;
163*4882a593Smuzhiyun u64 tx_large_packets;
164*4882a593Smuzhiyun u64 rx_large_packets;
165*4882a593Smuzhiyun /* Ethtool settings */
166*4882a593Smuzhiyun u8 duplex;
167*4882a593Smuzhiyun u32 speed;
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
172*4882a593Smuzhiyun * so we don't need to byteswap the two elements. However since we use
173*4882a593Smuzhiyun * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
174*4882a593Smuzhiyun * do end up with endian specific ordering of the elements and that
175*4882a593Smuzhiyun * needs correcting.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun struct ibmveth_buf_desc_fields {
178*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
179*4882a593Smuzhiyun u32 flags_len;
180*4882a593Smuzhiyun u32 address;
181*4882a593Smuzhiyun #else
182*4882a593Smuzhiyun u32 address;
183*4882a593Smuzhiyun u32 flags_len;
184*4882a593Smuzhiyun #endif
185*4882a593Smuzhiyun #define IBMVETH_BUF_VALID 0x80000000
186*4882a593Smuzhiyun #define IBMVETH_BUF_TOGGLE 0x40000000
187*4882a593Smuzhiyun #define IBMVETH_BUF_LRG_SND 0x04000000
188*4882a593Smuzhiyun #define IBMVETH_BUF_NO_CSUM 0x02000000
189*4882a593Smuzhiyun #define IBMVETH_BUF_CSUM_GOOD 0x01000000
190*4882a593Smuzhiyun #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun union ibmveth_buf_desc {
194*4882a593Smuzhiyun u64 desc;
195*4882a593Smuzhiyun struct ibmveth_buf_desc_fields fields;
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun struct ibmveth_rx_q_entry {
199*4882a593Smuzhiyun __be32 flags_off;
200*4882a593Smuzhiyun #define IBMVETH_RXQ_TOGGLE 0x80000000
201*4882a593Smuzhiyun #define IBMVETH_RXQ_TOGGLE_SHIFT 31
202*4882a593Smuzhiyun #define IBMVETH_RXQ_VALID 0x40000000
203*4882a593Smuzhiyun #define IBMVETH_RXQ_LRG_PKT 0x04000000
204*4882a593Smuzhiyun #define IBMVETH_RXQ_NO_CSUM 0x02000000
205*4882a593Smuzhiyun #define IBMVETH_RXQ_CSUM_GOOD 0x01000000
206*4882a593Smuzhiyun #define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun __be32 length;
209*4882a593Smuzhiyun /* correlator is only used by the OS, no need to byte swap */
210*4882a593Smuzhiyun u64 correlator;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #endif /* _IBMVETH_H */
214