xref: /OK3568_Linux_fs/kernel/drivers/net/thunderbolt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017, Intel Corporation
6*4882a593Smuzhiyun  * Authors: Amir Levy <amir.jer.levy@intel.com>
7*4882a593Smuzhiyun  *          Michael Jamet <michael.jamet@intel.com>
8*4882a593Smuzhiyun  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/atomic.h>
12*4882a593Smuzhiyun #include <linux/highmem.h>
13*4882a593Smuzhiyun #include <linux/if_vlan.h>
14*4882a593Smuzhiyun #include <linux/jhash.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/etherdevice.h>
17*4882a593Smuzhiyun #include <linux/rtnetlink.h>
18*4882a593Smuzhiyun #include <linux/sizes.h>
19*4882a593Smuzhiyun #include <linux/thunderbolt.h>
20*4882a593Smuzhiyun #include <linux/uuid.h>
21*4882a593Smuzhiyun #include <linux/workqueue.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <net/ip6_checksum.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* Protocol timeouts in ms */
26*4882a593Smuzhiyun #define TBNET_LOGIN_DELAY	4500
27*4882a593Smuzhiyun #define TBNET_LOGIN_TIMEOUT	500
28*4882a593Smuzhiyun #define TBNET_LOGOUT_TIMEOUT	100
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define TBNET_RING_SIZE		256
31*4882a593Smuzhiyun #define TBNET_LOCAL_PATH	0xf
32*4882a593Smuzhiyun #define TBNET_LOGIN_RETRIES	60
33*4882a593Smuzhiyun #define TBNET_LOGOUT_RETRIES	5
34*4882a593Smuzhiyun #define TBNET_MATCH_FRAGS_ID	BIT(1)
35*4882a593Smuzhiyun #define TBNET_MAX_MTU		SZ_64K
36*4882a593Smuzhiyun #define TBNET_FRAME_SIZE	SZ_4K
37*4882a593Smuzhiyun #define TBNET_MAX_PAYLOAD_SIZE	\
38*4882a593Smuzhiyun 	(TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
39*4882a593Smuzhiyun /* Rx packets need to hold space for skb_shared_info */
40*4882a593Smuzhiyun #define TBNET_RX_MAX_SIZE	\
41*4882a593Smuzhiyun 	(TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
42*4882a593Smuzhiyun #define TBNET_RX_PAGE_ORDER	get_order(TBNET_RX_MAX_SIZE)
43*4882a593Smuzhiyun #define TBNET_RX_PAGE_SIZE	(PAGE_SIZE << TBNET_RX_PAGE_ORDER)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun  * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
49*4882a593Smuzhiyun  * @frame_size: size of the data with the frame
50*4882a593Smuzhiyun  * @frame_index: running index on the frames
51*4882a593Smuzhiyun  * @frame_id: ID of the frame to match frames to specific packet
52*4882a593Smuzhiyun  * @frame_count: how many frames assembles a full packet
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  * Each data frame passed to the high-speed DMA ring has this header. If
55*4882a593Smuzhiyun  * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
56*4882a593Smuzhiyun  * supported then @frame_id is filled, otherwise it stays %0.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun struct thunderbolt_ip_frame_header {
59*4882a593Smuzhiyun 	u32 frame_size;
60*4882a593Smuzhiyun 	u16 frame_index;
61*4882a593Smuzhiyun 	u16 frame_id;
62*4882a593Smuzhiyun 	u32 frame_count;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun enum thunderbolt_ip_frame_pdf {
66*4882a593Smuzhiyun 	TBIP_PDF_FRAME_START = 1,
67*4882a593Smuzhiyun 	TBIP_PDF_FRAME_END,
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun enum thunderbolt_ip_type {
71*4882a593Smuzhiyun 	TBIP_LOGIN,
72*4882a593Smuzhiyun 	TBIP_LOGIN_RESPONSE,
73*4882a593Smuzhiyun 	TBIP_LOGOUT,
74*4882a593Smuzhiyun 	TBIP_STATUS,
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun struct thunderbolt_ip_header {
78*4882a593Smuzhiyun 	u32 route_hi;
79*4882a593Smuzhiyun 	u32 route_lo;
80*4882a593Smuzhiyun 	u32 length_sn;
81*4882a593Smuzhiyun 	uuid_t uuid;
82*4882a593Smuzhiyun 	uuid_t initiator_uuid;
83*4882a593Smuzhiyun 	uuid_t target_uuid;
84*4882a593Smuzhiyun 	u32 type;
85*4882a593Smuzhiyun 	u32 command_id;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define TBIP_HDR_LENGTH_MASK		GENMASK(5, 0)
89*4882a593Smuzhiyun #define TBIP_HDR_SN_MASK		GENMASK(28, 27)
90*4882a593Smuzhiyun #define TBIP_HDR_SN_SHIFT		27
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun struct thunderbolt_ip_login {
93*4882a593Smuzhiyun 	struct thunderbolt_ip_header hdr;
94*4882a593Smuzhiyun 	u32 proto_version;
95*4882a593Smuzhiyun 	u32 transmit_path;
96*4882a593Smuzhiyun 	u32 reserved[4];
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define TBIP_LOGIN_PROTO_VERSION	1
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct thunderbolt_ip_login_response {
102*4882a593Smuzhiyun 	struct thunderbolt_ip_header hdr;
103*4882a593Smuzhiyun 	u32 status;
104*4882a593Smuzhiyun 	u32 receiver_mac[2];
105*4882a593Smuzhiyun 	u32 receiver_mac_len;
106*4882a593Smuzhiyun 	u32 reserved[4];
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun struct thunderbolt_ip_logout {
110*4882a593Smuzhiyun 	struct thunderbolt_ip_header hdr;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun struct thunderbolt_ip_status {
114*4882a593Smuzhiyun 	struct thunderbolt_ip_header hdr;
115*4882a593Smuzhiyun 	u32 status;
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct tbnet_stats {
119*4882a593Smuzhiyun 	u64 tx_packets;
120*4882a593Smuzhiyun 	u64 rx_packets;
121*4882a593Smuzhiyun 	u64 tx_bytes;
122*4882a593Smuzhiyun 	u64 rx_bytes;
123*4882a593Smuzhiyun 	u64 rx_errors;
124*4882a593Smuzhiyun 	u64 tx_errors;
125*4882a593Smuzhiyun 	u64 rx_length_errors;
126*4882a593Smuzhiyun 	u64 rx_over_errors;
127*4882a593Smuzhiyun 	u64 rx_crc_errors;
128*4882a593Smuzhiyun 	u64 rx_missed_errors;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct tbnet_frame {
132*4882a593Smuzhiyun 	struct net_device *dev;
133*4882a593Smuzhiyun 	struct page *page;
134*4882a593Smuzhiyun 	struct ring_frame frame;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun struct tbnet_ring {
138*4882a593Smuzhiyun 	struct tbnet_frame frames[TBNET_RING_SIZE];
139*4882a593Smuzhiyun 	unsigned int cons;
140*4882a593Smuzhiyun 	unsigned int prod;
141*4882a593Smuzhiyun 	struct tb_ring *ring;
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun  * struct tbnet - ThunderboltIP network driver private data
146*4882a593Smuzhiyun  * @svc: XDomain service the driver is bound to
147*4882a593Smuzhiyun  * @xd: XDomain the service blongs to
148*4882a593Smuzhiyun  * @handler: ThunderboltIP configuration protocol handler
149*4882a593Smuzhiyun  * @dev: Networking device
150*4882a593Smuzhiyun  * @napi: NAPI structure for Rx polling
151*4882a593Smuzhiyun  * @stats: Network statistics
152*4882a593Smuzhiyun  * @skb: Network packet that is currently processed on Rx path
153*4882a593Smuzhiyun  * @command_id: ID used for next configuration protocol packet
154*4882a593Smuzhiyun  * @login_sent: ThunderboltIP login message successfully sent
155*4882a593Smuzhiyun  * @login_received: ThunderboltIP login message received from the remote
156*4882a593Smuzhiyun  *		    host
157*4882a593Smuzhiyun  * @transmit_path: HopID the other end needs to use building the
158*4882a593Smuzhiyun  *		   opposite side path.
159*4882a593Smuzhiyun  * @connection_lock: Lock serializing access to @login_sent,
160*4882a593Smuzhiyun  *		     @login_received and @transmit_path.
161*4882a593Smuzhiyun  * @login_retries: Number of login retries currently done
162*4882a593Smuzhiyun  * @login_work: Worker to send ThunderboltIP login packets
163*4882a593Smuzhiyun  * @connected_work: Worker that finalizes the ThunderboltIP connection
164*4882a593Smuzhiyun  *		    setup and enables DMA paths for high speed data
165*4882a593Smuzhiyun  *		    transfers
166*4882a593Smuzhiyun  * @disconnect_work: Worker that handles tearing down the ThunderboltIP
167*4882a593Smuzhiyun  *		     connection
168*4882a593Smuzhiyun  * @rx_hdr: Copy of the currently processed Rx frame. Used when a
169*4882a593Smuzhiyun  *	    network packet consists of multiple Thunderbolt frames.
170*4882a593Smuzhiyun  *	    In host byte order.
171*4882a593Smuzhiyun  * @rx_ring: Software ring holding Rx frames
172*4882a593Smuzhiyun  * @frame_id: Frame ID use for next Tx packet
173*4882a593Smuzhiyun  *            (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
174*4882a593Smuzhiyun  * @tx_ring: Software ring holding Tx frames
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun struct tbnet {
177*4882a593Smuzhiyun 	const struct tb_service *svc;
178*4882a593Smuzhiyun 	struct tb_xdomain *xd;
179*4882a593Smuzhiyun 	struct tb_protocol_handler handler;
180*4882a593Smuzhiyun 	struct net_device *dev;
181*4882a593Smuzhiyun 	struct napi_struct napi;
182*4882a593Smuzhiyun 	struct tbnet_stats stats;
183*4882a593Smuzhiyun 	struct sk_buff *skb;
184*4882a593Smuzhiyun 	atomic_t command_id;
185*4882a593Smuzhiyun 	bool login_sent;
186*4882a593Smuzhiyun 	bool login_received;
187*4882a593Smuzhiyun 	u32 transmit_path;
188*4882a593Smuzhiyun 	struct mutex connection_lock;
189*4882a593Smuzhiyun 	int login_retries;
190*4882a593Smuzhiyun 	struct delayed_work login_work;
191*4882a593Smuzhiyun 	struct work_struct connected_work;
192*4882a593Smuzhiyun 	struct work_struct disconnect_work;
193*4882a593Smuzhiyun 	struct thunderbolt_ip_frame_header rx_hdr;
194*4882a593Smuzhiyun 	struct tbnet_ring rx_ring;
195*4882a593Smuzhiyun 	atomic_t frame_id;
196*4882a593Smuzhiyun 	struct tbnet_ring tx_ring;
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
200*4882a593Smuzhiyun static const uuid_t tbnet_dir_uuid =
201*4882a593Smuzhiyun 	UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
202*4882a593Smuzhiyun 		  0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
205*4882a593Smuzhiyun static const uuid_t tbnet_svc_uuid =
206*4882a593Smuzhiyun 	UUID_INIT(0x798f589e, 0x3616, 0x8a47,
207*4882a593Smuzhiyun 		  0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun static struct tb_property_dir *tbnet_dir;
210*4882a593Smuzhiyun 
tbnet_fill_header(struct thunderbolt_ip_header * hdr,u64 route,u8 sequence,const uuid_t * initiator_uuid,const uuid_t * target_uuid,enum thunderbolt_ip_type type,size_t size,u32 command_id)211*4882a593Smuzhiyun static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
212*4882a593Smuzhiyun 	u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
213*4882a593Smuzhiyun 	enum thunderbolt_ip_type type, size_t size, u32 command_id)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	u32 length_sn;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* Length does not include route_hi/lo and length_sn fields */
218*4882a593Smuzhiyun 	length_sn = (size - 3 * 4) / 4;
219*4882a593Smuzhiyun 	length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	hdr->route_hi = upper_32_bits(route);
222*4882a593Smuzhiyun 	hdr->route_lo = lower_32_bits(route);
223*4882a593Smuzhiyun 	hdr->length_sn = length_sn;
224*4882a593Smuzhiyun 	uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
225*4882a593Smuzhiyun 	uuid_copy(&hdr->initiator_uuid, initiator_uuid);
226*4882a593Smuzhiyun 	uuid_copy(&hdr->target_uuid, target_uuid);
227*4882a593Smuzhiyun 	hdr->type = type;
228*4882a593Smuzhiyun 	hdr->command_id = command_id;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
tbnet_login_response(struct tbnet * net,u64 route,u8 sequence,u32 command_id)231*4882a593Smuzhiyun static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
232*4882a593Smuzhiyun 				u32 command_id)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct thunderbolt_ip_login_response reply;
235*4882a593Smuzhiyun 	struct tb_xdomain *xd = net->xd;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	memset(&reply, 0, sizeof(reply));
238*4882a593Smuzhiyun 	tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
239*4882a593Smuzhiyun 			  xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
240*4882a593Smuzhiyun 			  command_id);
241*4882a593Smuzhiyun 	memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
242*4882a593Smuzhiyun 	reply.receiver_mac_len = ETH_ALEN;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return tb_xdomain_response(xd, &reply, sizeof(reply),
245*4882a593Smuzhiyun 				   TB_CFG_PKG_XDOMAIN_RESP);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
tbnet_login_request(struct tbnet * net,u8 sequence)248*4882a593Smuzhiyun static int tbnet_login_request(struct tbnet *net, u8 sequence)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct thunderbolt_ip_login_response reply;
251*4882a593Smuzhiyun 	struct thunderbolt_ip_login request;
252*4882a593Smuzhiyun 	struct tb_xdomain *xd = net->xd;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	memset(&request, 0, sizeof(request));
255*4882a593Smuzhiyun 	tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
256*4882a593Smuzhiyun 			  xd->remote_uuid, TBIP_LOGIN, sizeof(request),
257*4882a593Smuzhiyun 			  atomic_inc_return(&net->command_id));
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	request.proto_version = TBIP_LOGIN_PROTO_VERSION;
260*4882a593Smuzhiyun 	request.transmit_path = TBNET_LOCAL_PATH;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	return tb_xdomain_request(xd, &request, sizeof(request),
263*4882a593Smuzhiyun 				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
264*4882a593Smuzhiyun 				  sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
265*4882a593Smuzhiyun 				  TBNET_LOGIN_TIMEOUT);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
tbnet_logout_response(struct tbnet * net,u64 route,u8 sequence,u32 command_id)268*4882a593Smuzhiyun static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
269*4882a593Smuzhiyun 				 u32 command_id)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct thunderbolt_ip_status reply;
272*4882a593Smuzhiyun 	struct tb_xdomain *xd = net->xd;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	memset(&reply, 0, sizeof(reply));
275*4882a593Smuzhiyun 	tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
276*4882a593Smuzhiyun 			  xd->remote_uuid, TBIP_STATUS, sizeof(reply),
277*4882a593Smuzhiyun 			  atomic_inc_return(&net->command_id));
278*4882a593Smuzhiyun 	return tb_xdomain_response(xd, &reply, sizeof(reply),
279*4882a593Smuzhiyun 				   TB_CFG_PKG_XDOMAIN_RESP);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
tbnet_logout_request(struct tbnet * net)282*4882a593Smuzhiyun static int tbnet_logout_request(struct tbnet *net)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct thunderbolt_ip_logout request;
285*4882a593Smuzhiyun 	struct thunderbolt_ip_status reply;
286*4882a593Smuzhiyun 	struct tb_xdomain *xd = net->xd;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	memset(&request, 0, sizeof(request));
289*4882a593Smuzhiyun 	tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
290*4882a593Smuzhiyun 			  xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
291*4882a593Smuzhiyun 			  atomic_inc_return(&net->command_id));
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	return tb_xdomain_request(xd, &request, sizeof(request),
294*4882a593Smuzhiyun 				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
295*4882a593Smuzhiyun 				  sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
296*4882a593Smuzhiyun 				  TBNET_LOGOUT_TIMEOUT);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
start_login(struct tbnet * net)299*4882a593Smuzhiyun static void start_login(struct tbnet *net)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	mutex_lock(&net->connection_lock);
302*4882a593Smuzhiyun 	net->login_sent = false;
303*4882a593Smuzhiyun 	net->login_received = false;
304*4882a593Smuzhiyun 	mutex_unlock(&net->connection_lock);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	queue_delayed_work(system_long_wq, &net->login_work,
307*4882a593Smuzhiyun 			   msecs_to_jiffies(1000));
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
stop_login(struct tbnet * net)310*4882a593Smuzhiyun static void stop_login(struct tbnet *net)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	cancel_delayed_work_sync(&net->login_work);
313*4882a593Smuzhiyun 	cancel_work_sync(&net->connected_work);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
tbnet_frame_size(const struct tbnet_frame * tf)316*4882a593Smuzhiyun static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return tf->frame.size ? : TBNET_FRAME_SIZE;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
tbnet_free_buffers(struct tbnet_ring * ring)321*4882a593Smuzhiyun static void tbnet_free_buffers(struct tbnet_ring *ring)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	unsigned int i;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	for (i = 0; i < TBNET_RING_SIZE; i++) {
326*4882a593Smuzhiyun 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
327*4882a593Smuzhiyun 		struct tbnet_frame *tf = &ring->frames[i];
328*4882a593Smuzhiyun 		enum dma_data_direction dir;
329*4882a593Smuzhiyun 		unsigned int order;
330*4882a593Smuzhiyun 		size_t size;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		if (!tf->page)
333*4882a593Smuzhiyun 			continue;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		if (ring->ring->is_tx) {
336*4882a593Smuzhiyun 			dir = DMA_TO_DEVICE;
337*4882a593Smuzhiyun 			order = 0;
338*4882a593Smuzhiyun 			size = TBNET_FRAME_SIZE;
339*4882a593Smuzhiyun 		} else {
340*4882a593Smuzhiyun 			dir = DMA_FROM_DEVICE;
341*4882a593Smuzhiyun 			order = TBNET_RX_PAGE_ORDER;
342*4882a593Smuzhiyun 			size = TBNET_RX_PAGE_SIZE;
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		if (tf->frame.buffer_phy)
346*4882a593Smuzhiyun 			dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
347*4882a593Smuzhiyun 				       dir);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		__free_pages(tf->page, order);
350*4882a593Smuzhiyun 		tf->page = NULL;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	ring->cons = 0;
354*4882a593Smuzhiyun 	ring->prod = 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
tbnet_tear_down(struct tbnet * net,bool send_logout)357*4882a593Smuzhiyun static void tbnet_tear_down(struct tbnet *net, bool send_logout)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	netif_carrier_off(net->dev);
360*4882a593Smuzhiyun 	netif_stop_queue(net->dev);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	stop_login(net);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	mutex_lock(&net->connection_lock);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (net->login_sent && net->login_received) {
367*4882a593Smuzhiyun 		int retries = TBNET_LOGOUT_RETRIES;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		while (send_logout && retries-- > 0) {
370*4882a593Smuzhiyun 			int ret = tbnet_logout_request(net);
371*4882a593Smuzhiyun 			if (ret != -ETIMEDOUT)
372*4882a593Smuzhiyun 				break;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		tb_ring_stop(net->rx_ring.ring);
376*4882a593Smuzhiyun 		tb_ring_stop(net->tx_ring.ring);
377*4882a593Smuzhiyun 		tbnet_free_buffers(&net->rx_ring);
378*4882a593Smuzhiyun 		tbnet_free_buffers(&net->tx_ring);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (tb_xdomain_disable_paths(net->xd))
381*4882a593Smuzhiyun 			netdev_warn(net->dev, "failed to disable DMA paths\n");
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	net->login_retries = 0;
385*4882a593Smuzhiyun 	net->login_sent = false;
386*4882a593Smuzhiyun 	net->login_received = false;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	mutex_unlock(&net->connection_lock);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
tbnet_handle_packet(const void * buf,size_t size,void * data)391*4882a593Smuzhiyun static int tbnet_handle_packet(const void *buf, size_t size, void *data)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	const struct thunderbolt_ip_login *pkg = buf;
394*4882a593Smuzhiyun 	struct tbnet *net = data;
395*4882a593Smuzhiyun 	u32 command_id;
396*4882a593Smuzhiyun 	int ret = 0;
397*4882a593Smuzhiyun 	u32 sequence;
398*4882a593Smuzhiyun 	u64 route;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Make sure the packet is for us */
401*4882a593Smuzhiyun 	if (size < sizeof(struct thunderbolt_ip_header))
402*4882a593Smuzhiyun 		return 0;
403*4882a593Smuzhiyun 	if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
404*4882a593Smuzhiyun 		return 0;
405*4882a593Smuzhiyun 	if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
406*4882a593Smuzhiyun 		return 0;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
409*4882a593Smuzhiyun 	route &= ~BIT_ULL(63);
410*4882a593Smuzhiyun 	if (route != net->xd->route)
411*4882a593Smuzhiyun 		return 0;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
414*4882a593Smuzhiyun 	sequence >>= TBIP_HDR_SN_SHIFT;
415*4882a593Smuzhiyun 	command_id = pkg->hdr.command_id;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	switch (pkg->hdr.type) {
418*4882a593Smuzhiyun 	case TBIP_LOGIN:
419*4882a593Smuzhiyun 		if (!netif_running(net->dev))
420*4882a593Smuzhiyun 			break;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		ret = tbnet_login_response(net, route, sequence,
423*4882a593Smuzhiyun 					   pkg->hdr.command_id);
424*4882a593Smuzhiyun 		if (!ret) {
425*4882a593Smuzhiyun 			mutex_lock(&net->connection_lock);
426*4882a593Smuzhiyun 			net->login_received = true;
427*4882a593Smuzhiyun 			net->transmit_path = pkg->transmit_path;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 			/* If we reached the number of max retries or
430*4882a593Smuzhiyun 			 * previous logout, schedule another round of
431*4882a593Smuzhiyun 			 * login retries
432*4882a593Smuzhiyun 			 */
433*4882a593Smuzhiyun 			if (net->login_retries >= TBNET_LOGIN_RETRIES ||
434*4882a593Smuzhiyun 			    !net->login_sent) {
435*4882a593Smuzhiyun 				net->login_retries = 0;
436*4882a593Smuzhiyun 				queue_delayed_work(system_long_wq,
437*4882a593Smuzhiyun 						   &net->login_work, 0);
438*4882a593Smuzhiyun 			}
439*4882a593Smuzhiyun 			mutex_unlock(&net->connection_lock);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 			queue_work(system_long_wq, &net->connected_work);
442*4882a593Smuzhiyun 		}
443*4882a593Smuzhiyun 		break;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	case TBIP_LOGOUT:
446*4882a593Smuzhiyun 		ret = tbnet_logout_response(net, route, sequence, command_id);
447*4882a593Smuzhiyun 		if (!ret)
448*4882a593Smuzhiyun 			queue_work(system_long_wq, &net->disconnect_work);
449*4882a593Smuzhiyun 		break;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	default:
452*4882a593Smuzhiyun 		return 0;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (ret)
456*4882a593Smuzhiyun 		netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return 1;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
tbnet_available_buffers(const struct tbnet_ring * ring)461*4882a593Smuzhiyun static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	return ring->prod - ring->cons;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
tbnet_alloc_rx_buffers(struct tbnet * net,unsigned int nbuffers)466*4882a593Smuzhiyun static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct tbnet_ring *ring = &net->rx_ring;
469*4882a593Smuzhiyun 	int ret;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	while (nbuffers--) {
472*4882a593Smuzhiyun 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
473*4882a593Smuzhiyun 		unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
474*4882a593Smuzhiyun 		struct tbnet_frame *tf = &ring->frames[index];
475*4882a593Smuzhiyun 		dma_addr_t dma_addr;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 		if (tf->page)
478*4882a593Smuzhiyun 			break;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		/* Allocate page (order > 0) so that it can hold maximum
481*4882a593Smuzhiyun 		 * ThunderboltIP frame (4kB) and the additional room for
482*4882a593Smuzhiyun 		 * SKB shared info required by build_skb().
483*4882a593Smuzhiyun 		 */
484*4882a593Smuzhiyun 		tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
485*4882a593Smuzhiyun 		if (!tf->page) {
486*4882a593Smuzhiyun 			ret = -ENOMEM;
487*4882a593Smuzhiyun 			goto err_free;
488*4882a593Smuzhiyun 		}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		dma_addr = dma_map_page(dma_dev, tf->page, 0,
491*4882a593Smuzhiyun 					TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
492*4882a593Smuzhiyun 		if (dma_mapping_error(dma_dev, dma_addr)) {
493*4882a593Smuzhiyun 			ret = -ENOMEM;
494*4882a593Smuzhiyun 			goto err_free;
495*4882a593Smuzhiyun 		}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 		tf->frame.buffer_phy = dma_addr;
498*4882a593Smuzhiyun 		tf->dev = net->dev;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 		tb_ring_rx(ring->ring, &tf->frame);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 		ring->prod++;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return 0;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun err_free:
508*4882a593Smuzhiyun 	tbnet_free_buffers(ring);
509*4882a593Smuzhiyun 	return ret;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
tbnet_get_tx_buffer(struct tbnet * net)512*4882a593Smuzhiyun static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct tbnet_ring *ring = &net->tx_ring;
515*4882a593Smuzhiyun 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
516*4882a593Smuzhiyun 	struct tbnet_frame *tf;
517*4882a593Smuzhiyun 	unsigned int index;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	if (!tbnet_available_buffers(ring))
520*4882a593Smuzhiyun 		return NULL;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	index = ring->cons++ & (TBNET_RING_SIZE - 1);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	tf = &ring->frames[index];
525*4882a593Smuzhiyun 	tf->frame.size = 0;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
528*4882a593Smuzhiyun 				tbnet_frame_size(tf), DMA_TO_DEVICE);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	return tf;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
tbnet_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)533*4882a593Smuzhiyun static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
534*4882a593Smuzhiyun 			      bool canceled)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
537*4882a593Smuzhiyun 	struct tbnet *net = netdev_priv(tf->dev);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	/* Return buffer to the ring */
540*4882a593Smuzhiyun 	net->tx_ring.prod++;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
543*4882a593Smuzhiyun 		netif_wake_queue(net->dev);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
tbnet_alloc_tx_buffers(struct tbnet * net)546*4882a593Smuzhiyun static int tbnet_alloc_tx_buffers(struct tbnet *net)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	struct tbnet_ring *ring = &net->tx_ring;
549*4882a593Smuzhiyun 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
550*4882a593Smuzhiyun 	unsigned int i;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	for (i = 0; i < TBNET_RING_SIZE; i++) {
553*4882a593Smuzhiyun 		struct tbnet_frame *tf = &ring->frames[i];
554*4882a593Smuzhiyun 		dma_addr_t dma_addr;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 		tf->page = alloc_page(GFP_KERNEL);
557*4882a593Smuzhiyun 		if (!tf->page) {
558*4882a593Smuzhiyun 			tbnet_free_buffers(ring);
559*4882a593Smuzhiyun 			return -ENOMEM;
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 		dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
563*4882a593Smuzhiyun 					DMA_TO_DEVICE);
564*4882a593Smuzhiyun 		if (dma_mapping_error(dma_dev, dma_addr)) {
565*4882a593Smuzhiyun 			__free_page(tf->page);
566*4882a593Smuzhiyun 			tf->page = NULL;
567*4882a593Smuzhiyun 			tbnet_free_buffers(ring);
568*4882a593Smuzhiyun 			return -ENOMEM;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		tf->dev = net->dev;
572*4882a593Smuzhiyun 		tf->frame.buffer_phy = dma_addr;
573*4882a593Smuzhiyun 		tf->frame.callback = tbnet_tx_callback;
574*4882a593Smuzhiyun 		tf->frame.sof = TBIP_PDF_FRAME_START;
575*4882a593Smuzhiyun 		tf->frame.eof = TBIP_PDF_FRAME_END;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	ring->cons = 0;
579*4882a593Smuzhiyun 	ring->prod = TBNET_RING_SIZE - 1;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	return 0;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
tbnet_connected_work(struct work_struct * work)584*4882a593Smuzhiyun static void tbnet_connected_work(struct work_struct *work)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	struct tbnet *net = container_of(work, typeof(*net), connected_work);
587*4882a593Smuzhiyun 	bool connected;
588*4882a593Smuzhiyun 	int ret;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (netif_carrier_ok(net->dev))
591*4882a593Smuzhiyun 		return;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	mutex_lock(&net->connection_lock);
594*4882a593Smuzhiyun 	connected = net->login_sent && net->login_received;
595*4882a593Smuzhiyun 	mutex_unlock(&net->connection_lock);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if (!connected)
598*4882a593Smuzhiyun 		return;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/* Both logins successful so enable the high-speed DMA paths and
601*4882a593Smuzhiyun 	 * start the network device queue.
602*4882a593Smuzhiyun 	 */
603*4882a593Smuzhiyun 	ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
604*4882a593Smuzhiyun 				      net->rx_ring.ring->hop,
605*4882a593Smuzhiyun 				      net->transmit_path,
606*4882a593Smuzhiyun 				      net->tx_ring.ring->hop);
607*4882a593Smuzhiyun 	if (ret) {
608*4882a593Smuzhiyun 		netdev_err(net->dev, "failed to enable DMA paths\n");
609*4882a593Smuzhiyun 		return;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	tb_ring_start(net->tx_ring.ring);
613*4882a593Smuzhiyun 	tb_ring_start(net->rx_ring.ring);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
616*4882a593Smuzhiyun 	if (ret)
617*4882a593Smuzhiyun 		goto err_stop_rings;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	ret = tbnet_alloc_tx_buffers(net);
620*4882a593Smuzhiyun 	if (ret)
621*4882a593Smuzhiyun 		goto err_free_rx_buffers;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	netif_carrier_on(net->dev);
624*4882a593Smuzhiyun 	netif_start_queue(net->dev);
625*4882a593Smuzhiyun 	return;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun err_free_rx_buffers:
628*4882a593Smuzhiyun 	tbnet_free_buffers(&net->rx_ring);
629*4882a593Smuzhiyun err_stop_rings:
630*4882a593Smuzhiyun 	tb_ring_stop(net->rx_ring.ring);
631*4882a593Smuzhiyun 	tb_ring_stop(net->tx_ring.ring);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
tbnet_login_work(struct work_struct * work)634*4882a593Smuzhiyun static void tbnet_login_work(struct work_struct *work)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	struct tbnet *net = container_of(work, typeof(*net), login_work.work);
637*4882a593Smuzhiyun 	unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
638*4882a593Smuzhiyun 	int ret;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	if (netif_carrier_ok(net->dev))
641*4882a593Smuzhiyun 		return;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	ret = tbnet_login_request(net, net->login_retries % 4);
644*4882a593Smuzhiyun 	if (ret) {
645*4882a593Smuzhiyun 		if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
646*4882a593Smuzhiyun 			queue_delayed_work(system_long_wq, &net->login_work,
647*4882a593Smuzhiyun 					   delay);
648*4882a593Smuzhiyun 		} else {
649*4882a593Smuzhiyun 			netdev_info(net->dev, "ThunderboltIP login timed out\n");
650*4882a593Smuzhiyun 		}
651*4882a593Smuzhiyun 	} else {
652*4882a593Smuzhiyun 		net->login_retries = 0;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		mutex_lock(&net->connection_lock);
655*4882a593Smuzhiyun 		net->login_sent = true;
656*4882a593Smuzhiyun 		mutex_unlock(&net->connection_lock);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 		queue_work(system_long_wq, &net->connected_work);
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
tbnet_disconnect_work(struct work_struct * work)662*4882a593Smuzhiyun static void tbnet_disconnect_work(struct work_struct *work)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	tbnet_tear_down(net, false);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
tbnet_check_frame(struct tbnet * net,const struct tbnet_frame * tf,const struct thunderbolt_ip_frame_header * hdr)669*4882a593Smuzhiyun static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
670*4882a593Smuzhiyun 			      const struct thunderbolt_ip_frame_header *hdr)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	u32 frame_id, frame_count, frame_size, frame_index;
673*4882a593Smuzhiyun 	unsigned int size;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (tf->frame.flags & RING_DESC_CRC_ERROR) {
676*4882a593Smuzhiyun 		net->stats.rx_crc_errors++;
677*4882a593Smuzhiyun 		return false;
678*4882a593Smuzhiyun 	} else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
679*4882a593Smuzhiyun 		net->stats.rx_over_errors++;
680*4882a593Smuzhiyun 		return false;
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	/* Should be greater than just header i.e. contains data */
684*4882a593Smuzhiyun 	size = tbnet_frame_size(tf);
685*4882a593Smuzhiyun 	if (size <= sizeof(*hdr)) {
686*4882a593Smuzhiyun 		net->stats.rx_length_errors++;
687*4882a593Smuzhiyun 		return false;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	frame_count = le32_to_cpu(hdr->frame_count);
691*4882a593Smuzhiyun 	frame_size = le32_to_cpu(hdr->frame_size);
692*4882a593Smuzhiyun 	frame_index = le16_to_cpu(hdr->frame_index);
693*4882a593Smuzhiyun 	frame_id = le16_to_cpu(hdr->frame_id);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
696*4882a593Smuzhiyun 		net->stats.rx_length_errors++;
697*4882a593Smuzhiyun 		return false;
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	/* In case we're in the middle of packet, validate the frame
701*4882a593Smuzhiyun 	 * header based on first fragment of the packet.
702*4882a593Smuzhiyun 	 */
703*4882a593Smuzhiyun 	if (net->skb && net->rx_hdr.frame_count) {
704*4882a593Smuzhiyun 		/* Check the frame count fits the count field */
705*4882a593Smuzhiyun 		if (frame_count != net->rx_hdr.frame_count) {
706*4882a593Smuzhiyun 			net->stats.rx_length_errors++;
707*4882a593Smuzhiyun 			return false;
708*4882a593Smuzhiyun 		}
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		/* Check the frame identifiers are incremented correctly,
711*4882a593Smuzhiyun 		 * and id is matching.
712*4882a593Smuzhiyun 		 */
713*4882a593Smuzhiyun 		if (frame_index != net->rx_hdr.frame_index + 1 ||
714*4882a593Smuzhiyun 		    frame_id != net->rx_hdr.frame_id) {
715*4882a593Smuzhiyun 			net->stats.rx_missed_errors++;
716*4882a593Smuzhiyun 			return false;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 		if (net->skb->len + frame_size > TBNET_MAX_MTU) {
720*4882a593Smuzhiyun 			net->stats.rx_length_errors++;
721*4882a593Smuzhiyun 			return false;
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		return true;
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/* Start of packet, validate the frame header */
728*4882a593Smuzhiyun 	if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
729*4882a593Smuzhiyun 		net->stats.rx_length_errors++;
730*4882a593Smuzhiyun 		return false;
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 	if (frame_index != 0) {
733*4882a593Smuzhiyun 		net->stats.rx_missed_errors++;
734*4882a593Smuzhiyun 		return false;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	return true;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
tbnet_poll(struct napi_struct * napi,int budget)740*4882a593Smuzhiyun static int tbnet_poll(struct napi_struct *napi, int budget)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	struct tbnet *net = container_of(napi, struct tbnet, napi);
743*4882a593Smuzhiyun 	unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
744*4882a593Smuzhiyun 	struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
745*4882a593Smuzhiyun 	unsigned int rx_packets = 0;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	while (rx_packets < budget) {
748*4882a593Smuzhiyun 		const struct thunderbolt_ip_frame_header *hdr;
749*4882a593Smuzhiyun 		unsigned int hdr_size = sizeof(*hdr);
750*4882a593Smuzhiyun 		struct sk_buff *skb = NULL;
751*4882a593Smuzhiyun 		struct ring_frame *frame;
752*4882a593Smuzhiyun 		struct tbnet_frame *tf;
753*4882a593Smuzhiyun 		struct page *page;
754*4882a593Smuzhiyun 		bool last = true;
755*4882a593Smuzhiyun 		u32 frame_size;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		/* Return some buffers to hardware, one at a time is too
758*4882a593Smuzhiyun 		 * slow so allocate MAX_SKB_FRAGS buffers at the same
759*4882a593Smuzhiyun 		 * time.
760*4882a593Smuzhiyun 		 */
761*4882a593Smuzhiyun 		if (cleaned_count >= MAX_SKB_FRAGS) {
762*4882a593Smuzhiyun 			tbnet_alloc_rx_buffers(net, cleaned_count);
763*4882a593Smuzhiyun 			cleaned_count = 0;
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		frame = tb_ring_poll(net->rx_ring.ring);
767*4882a593Smuzhiyun 		if (!frame)
768*4882a593Smuzhiyun 			break;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 		dma_unmap_page(dma_dev, frame->buffer_phy,
771*4882a593Smuzhiyun 			       TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 		tf = container_of(frame, typeof(*tf), frame);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 		page = tf->page;
776*4882a593Smuzhiyun 		tf->page = NULL;
777*4882a593Smuzhiyun 		net->rx_ring.cons++;
778*4882a593Smuzhiyun 		cleaned_count++;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		hdr = page_address(page);
781*4882a593Smuzhiyun 		if (!tbnet_check_frame(net, tf, hdr)) {
782*4882a593Smuzhiyun 			__free_pages(page, TBNET_RX_PAGE_ORDER);
783*4882a593Smuzhiyun 			dev_kfree_skb_any(net->skb);
784*4882a593Smuzhiyun 			net->skb = NULL;
785*4882a593Smuzhiyun 			continue;
786*4882a593Smuzhiyun 		}
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 		frame_size = le32_to_cpu(hdr->frame_size);
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 		skb = net->skb;
791*4882a593Smuzhiyun 		if (!skb) {
792*4882a593Smuzhiyun 			skb = build_skb(page_address(page),
793*4882a593Smuzhiyun 					TBNET_RX_PAGE_SIZE);
794*4882a593Smuzhiyun 			if (!skb) {
795*4882a593Smuzhiyun 				__free_pages(page, TBNET_RX_PAGE_ORDER);
796*4882a593Smuzhiyun 				net->stats.rx_errors++;
797*4882a593Smuzhiyun 				break;
798*4882a593Smuzhiyun 			}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 			skb_reserve(skb, hdr_size);
801*4882a593Smuzhiyun 			skb_put(skb, frame_size);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 			net->skb = skb;
804*4882a593Smuzhiyun 		} else {
805*4882a593Smuzhiyun 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
806*4882a593Smuzhiyun 					page, hdr_size, frame_size,
807*4882a593Smuzhiyun 					TBNET_RX_PAGE_SIZE - hdr_size);
808*4882a593Smuzhiyun 		}
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 		net->rx_hdr.frame_size = frame_size;
811*4882a593Smuzhiyun 		net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
812*4882a593Smuzhiyun 		net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
813*4882a593Smuzhiyun 		net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
814*4882a593Smuzhiyun 		last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 		rx_packets++;
817*4882a593Smuzhiyun 		net->stats.rx_bytes += frame_size;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 		if (last) {
820*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, net->dev);
821*4882a593Smuzhiyun 			napi_gro_receive(&net->napi, skb);
822*4882a593Smuzhiyun 			net->skb = NULL;
823*4882a593Smuzhiyun 		}
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	net->stats.rx_packets += rx_packets;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (cleaned_count)
829*4882a593Smuzhiyun 		tbnet_alloc_rx_buffers(net, cleaned_count);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (rx_packets >= budget)
832*4882a593Smuzhiyun 		return budget;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	napi_complete_done(napi, rx_packets);
835*4882a593Smuzhiyun 	/* Re-enable the ring interrupt */
836*4882a593Smuzhiyun 	tb_ring_poll_complete(net->rx_ring.ring);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	return rx_packets;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun 
tbnet_start_poll(void * data)841*4882a593Smuzhiyun static void tbnet_start_poll(void *data)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	struct tbnet *net = data;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	napi_schedule(&net->napi);
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
tbnet_open(struct net_device * dev)848*4882a593Smuzhiyun static int tbnet_open(struct net_device *dev)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	struct tbnet *net = netdev_priv(dev);
851*4882a593Smuzhiyun 	struct tb_xdomain *xd = net->xd;
852*4882a593Smuzhiyun 	u16 sof_mask, eof_mask;
853*4882a593Smuzhiyun 	struct tb_ring *ring;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	netif_carrier_off(dev);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
858*4882a593Smuzhiyun 				RING_FLAG_FRAME);
859*4882a593Smuzhiyun 	if (!ring) {
860*4882a593Smuzhiyun 		netdev_err(dev, "failed to allocate Tx ring\n");
861*4882a593Smuzhiyun 		return -ENOMEM;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 	net->tx_ring.ring = ring;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	sof_mask = BIT(TBIP_PDF_FRAME_START);
866*4882a593Smuzhiyun 	eof_mask = BIT(TBIP_PDF_FRAME_END);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
869*4882a593Smuzhiyun 				RING_FLAG_FRAME, sof_mask, eof_mask,
870*4882a593Smuzhiyun 				tbnet_start_poll, net);
871*4882a593Smuzhiyun 	if (!ring) {
872*4882a593Smuzhiyun 		netdev_err(dev, "failed to allocate Rx ring\n");
873*4882a593Smuzhiyun 		tb_ring_free(net->tx_ring.ring);
874*4882a593Smuzhiyun 		net->tx_ring.ring = NULL;
875*4882a593Smuzhiyun 		return -ENOMEM;
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 	net->rx_ring.ring = ring;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	napi_enable(&net->napi);
880*4882a593Smuzhiyun 	start_login(net);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	return 0;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun 
tbnet_stop(struct net_device * dev)885*4882a593Smuzhiyun static int tbnet_stop(struct net_device *dev)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	struct tbnet *net = netdev_priv(dev);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	napi_disable(&net->napi);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	cancel_work_sync(&net->disconnect_work);
892*4882a593Smuzhiyun 	tbnet_tear_down(net, true);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	tb_ring_free(net->rx_ring.ring);
895*4882a593Smuzhiyun 	net->rx_ring.ring = NULL;
896*4882a593Smuzhiyun 	tb_ring_free(net->tx_ring.ring);
897*4882a593Smuzhiyun 	net->tx_ring.ring = NULL;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	return 0;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun 
tbnet_xmit_csum_and_map(struct tbnet * net,struct sk_buff * skb,struct tbnet_frame ** frames,u32 frame_count)902*4882a593Smuzhiyun static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
903*4882a593Smuzhiyun 	struct tbnet_frame **frames, u32 frame_count)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
906*4882a593Smuzhiyun 	struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
907*4882a593Smuzhiyun 	__wsum wsum = htonl(skb->len - skb_transport_offset(skb));
908*4882a593Smuzhiyun 	unsigned int i, len, offset = skb_transport_offset(skb);
909*4882a593Smuzhiyun 	__be16 protocol = skb->protocol;
910*4882a593Smuzhiyun 	void *data = skb->data;
911*4882a593Smuzhiyun 	void *dest = hdr + 1;
912*4882a593Smuzhiyun 	__sum16 *tucso;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
915*4882a593Smuzhiyun 		/* No need to calculate checksum so we just update the
916*4882a593Smuzhiyun 		 * total frame count and sync the frames for DMA.
917*4882a593Smuzhiyun 		 */
918*4882a593Smuzhiyun 		for (i = 0; i < frame_count; i++) {
919*4882a593Smuzhiyun 			hdr = page_address(frames[i]->page);
920*4882a593Smuzhiyun 			hdr->frame_count = cpu_to_le32(frame_count);
921*4882a593Smuzhiyun 			dma_sync_single_for_device(dma_dev,
922*4882a593Smuzhiyun 				frames[i]->frame.buffer_phy,
923*4882a593Smuzhiyun 				tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
924*4882a593Smuzhiyun 		}
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 		return true;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (protocol == htons(ETH_P_8021Q)) {
930*4882a593Smuzhiyun 		struct vlan_hdr *vhdr, vh;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
933*4882a593Smuzhiyun 		if (!vhdr)
934*4882a593Smuzhiyun 			return false;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 		protocol = vhdr->h_vlan_encapsulated_proto;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	/* Data points on the beginning of packet.
940*4882a593Smuzhiyun 	 * Check is the checksum absolute place in the packet.
941*4882a593Smuzhiyun 	 * ipcso will update IP checksum.
942*4882a593Smuzhiyun 	 * tucso will update TCP/UPD checksum.
943*4882a593Smuzhiyun 	 */
944*4882a593Smuzhiyun 	if (protocol == htons(ETH_P_IP)) {
945*4882a593Smuzhiyun 		__sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 		*ipcso = 0;
948*4882a593Smuzhiyun 		*ipcso = ip_fast_csum(dest + skb_network_offset(skb),
949*4882a593Smuzhiyun 				      ip_hdr(skb)->ihl);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
952*4882a593Smuzhiyun 			tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
953*4882a593Smuzhiyun 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
954*4882a593Smuzhiyun 			tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
955*4882a593Smuzhiyun 		else
956*4882a593Smuzhiyun 			return false;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 		*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
959*4882a593Smuzhiyun 					    ip_hdr(skb)->daddr, 0,
960*4882a593Smuzhiyun 					    ip_hdr(skb)->protocol, 0);
961*4882a593Smuzhiyun 	} else if (skb_is_gso_v6(skb)) {
962*4882a593Smuzhiyun 		tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
963*4882a593Smuzhiyun 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
964*4882a593Smuzhiyun 					  &ipv6_hdr(skb)->daddr, 0,
965*4882a593Smuzhiyun 					  IPPROTO_TCP, 0);
966*4882a593Smuzhiyun 		return false;
967*4882a593Smuzhiyun 	} else if (protocol == htons(ETH_P_IPV6)) {
968*4882a593Smuzhiyun 		tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
969*4882a593Smuzhiyun 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
970*4882a593Smuzhiyun 					  &ipv6_hdr(skb)->daddr, 0,
971*4882a593Smuzhiyun 					  ipv6_hdr(skb)->nexthdr, 0);
972*4882a593Smuzhiyun 	} else {
973*4882a593Smuzhiyun 		return false;
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* First frame was headers, rest of the frames contain data.
977*4882a593Smuzhiyun 	 * Calculate checksum over each frame.
978*4882a593Smuzhiyun 	 */
979*4882a593Smuzhiyun 	for (i = 0; i < frame_count; i++) {
980*4882a593Smuzhiyun 		hdr = page_address(frames[i]->page);
981*4882a593Smuzhiyun 		dest = (void *)(hdr + 1) + offset;
982*4882a593Smuzhiyun 		len = le32_to_cpu(hdr->frame_size) - offset;
983*4882a593Smuzhiyun 		wsum = csum_partial(dest, len, wsum);
984*4882a593Smuzhiyun 		hdr->frame_count = cpu_to_le32(frame_count);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 		offset = 0;
987*4882a593Smuzhiyun 	}
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	*tucso = csum_fold(wsum);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	/* Checksum is finally calculated and we don't touch the memory
992*4882a593Smuzhiyun 	 * anymore, so DMA sync the frames now.
993*4882a593Smuzhiyun 	 */
994*4882a593Smuzhiyun 	for (i = 0; i < frame_count; i++) {
995*4882a593Smuzhiyun 		dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
996*4882a593Smuzhiyun 			tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
997*4882a593Smuzhiyun 	}
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	return true;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
tbnet_kmap_frag(struct sk_buff * skb,unsigned int frag_num,unsigned int * len)1002*4882a593Smuzhiyun static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
1003*4882a593Smuzhiyun 			     unsigned int *len)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	*len = skb_frag_size(frag);
1008*4882a593Smuzhiyun 	return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
tbnet_start_xmit(struct sk_buff * skb,struct net_device * dev)1011*4882a593Smuzhiyun static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
1012*4882a593Smuzhiyun 				    struct net_device *dev)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	struct tbnet *net = netdev_priv(dev);
1015*4882a593Smuzhiyun 	struct tbnet_frame *frames[MAX_SKB_FRAGS];
1016*4882a593Smuzhiyun 	u16 frame_id = atomic_read(&net->frame_id);
1017*4882a593Smuzhiyun 	struct thunderbolt_ip_frame_header *hdr;
1018*4882a593Smuzhiyun 	unsigned int len = skb_headlen(skb);
1019*4882a593Smuzhiyun 	unsigned int data_len = skb->len;
1020*4882a593Smuzhiyun 	unsigned int nframes, i;
1021*4882a593Smuzhiyun 	unsigned int frag = 0;
1022*4882a593Smuzhiyun 	void *src = skb->data;
1023*4882a593Smuzhiyun 	u32 frame_index = 0;
1024*4882a593Smuzhiyun 	bool unmap = false;
1025*4882a593Smuzhiyun 	void *dest;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
1028*4882a593Smuzhiyun 	if (tbnet_available_buffers(&net->tx_ring) < nframes) {
1029*4882a593Smuzhiyun 		netif_stop_queue(net->dev);
1030*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
1031*4882a593Smuzhiyun 	}
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	frames[frame_index] = tbnet_get_tx_buffer(net);
1034*4882a593Smuzhiyun 	if (!frames[frame_index])
1035*4882a593Smuzhiyun 		goto err_drop;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	hdr = page_address(frames[frame_index]->page);
1038*4882a593Smuzhiyun 	dest = hdr + 1;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	/* If overall packet is bigger than the frame data size */
1041*4882a593Smuzhiyun 	while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
1042*4882a593Smuzhiyun 		unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 		hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
1045*4882a593Smuzhiyun 		hdr->frame_index = cpu_to_le16(frame_index);
1046*4882a593Smuzhiyun 		hdr->frame_id = cpu_to_le16(frame_id);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 		do {
1049*4882a593Smuzhiyun 			if (len > size_left) {
1050*4882a593Smuzhiyun 				/* Copy data onto Tx buffer data with
1051*4882a593Smuzhiyun 				 * full frame size then break and go to
1052*4882a593Smuzhiyun 				 * next frame
1053*4882a593Smuzhiyun 				 */
1054*4882a593Smuzhiyun 				memcpy(dest, src, size_left);
1055*4882a593Smuzhiyun 				len -= size_left;
1056*4882a593Smuzhiyun 				dest += size_left;
1057*4882a593Smuzhiyun 				src += size_left;
1058*4882a593Smuzhiyun 				break;
1059*4882a593Smuzhiyun 			}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 			memcpy(dest, src, len);
1062*4882a593Smuzhiyun 			size_left -= len;
1063*4882a593Smuzhiyun 			dest += len;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 			if (unmap) {
1066*4882a593Smuzhiyun 				kunmap_atomic(src);
1067*4882a593Smuzhiyun 				unmap = false;
1068*4882a593Smuzhiyun 			}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 			/* Ensure all fragments have been processed */
1071*4882a593Smuzhiyun 			if (frag < skb_shinfo(skb)->nr_frags) {
1072*4882a593Smuzhiyun 				/* Map and then unmap quickly */
1073*4882a593Smuzhiyun 				src = tbnet_kmap_frag(skb, frag++, &len);
1074*4882a593Smuzhiyun 				unmap = true;
1075*4882a593Smuzhiyun 			} else if (unlikely(size_left > 0)) {
1076*4882a593Smuzhiyun 				goto err_drop;
1077*4882a593Smuzhiyun 			}
1078*4882a593Smuzhiyun 		} while (size_left > 0);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 		data_len -= TBNET_MAX_PAYLOAD_SIZE;
1081*4882a593Smuzhiyun 		frame_index++;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		frames[frame_index] = tbnet_get_tx_buffer(net);
1084*4882a593Smuzhiyun 		if (!frames[frame_index])
1085*4882a593Smuzhiyun 			goto err_drop;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 		hdr = page_address(frames[frame_index]->page);
1088*4882a593Smuzhiyun 		dest = hdr + 1;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	hdr->frame_size = cpu_to_le32(data_len);
1092*4882a593Smuzhiyun 	hdr->frame_index = cpu_to_le16(frame_index);
1093*4882a593Smuzhiyun 	hdr->frame_id = cpu_to_le16(frame_id);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	frames[frame_index]->frame.size = data_len + sizeof(*hdr);
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	/* In case the remaining data_len is smaller than a frame */
1098*4882a593Smuzhiyun 	while (len < data_len) {
1099*4882a593Smuzhiyun 		memcpy(dest, src, len);
1100*4882a593Smuzhiyun 		data_len -= len;
1101*4882a593Smuzhiyun 		dest += len;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 		if (unmap) {
1104*4882a593Smuzhiyun 			kunmap_atomic(src);
1105*4882a593Smuzhiyun 			unmap = false;
1106*4882a593Smuzhiyun 		}
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 		if (frag < skb_shinfo(skb)->nr_frags) {
1109*4882a593Smuzhiyun 			src = tbnet_kmap_frag(skb, frag++, &len);
1110*4882a593Smuzhiyun 			unmap = true;
1111*4882a593Smuzhiyun 		} else if (unlikely(data_len > 0)) {
1112*4882a593Smuzhiyun 			goto err_drop;
1113*4882a593Smuzhiyun 		}
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	memcpy(dest, src, data_len);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	if (unmap)
1119*4882a593Smuzhiyun 		kunmap_atomic(src);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
1122*4882a593Smuzhiyun 		goto err_drop;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	for (i = 0; i < frame_index + 1; i++)
1125*4882a593Smuzhiyun 		tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
1128*4882a593Smuzhiyun 		atomic_inc(&net->frame_id);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	net->stats.tx_packets++;
1131*4882a593Smuzhiyun 	net->stats.tx_bytes += skb->len;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	dev_consume_skb_any(skb);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun err_drop:
1138*4882a593Smuzhiyun 	/* We can re-use the buffers */
1139*4882a593Smuzhiyun 	net->tx_ring.cons -= frame_index;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
1142*4882a593Smuzhiyun 	net->stats.tx_errors++;
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun 
tbnet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1147*4882a593Smuzhiyun static void tbnet_get_stats64(struct net_device *dev,
1148*4882a593Smuzhiyun 			      struct rtnl_link_stats64 *stats)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun 	struct tbnet *net = netdev_priv(dev);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	stats->tx_packets = net->stats.tx_packets;
1153*4882a593Smuzhiyun 	stats->rx_packets = net->stats.rx_packets;
1154*4882a593Smuzhiyun 	stats->tx_bytes = net->stats.tx_bytes;
1155*4882a593Smuzhiyun 	stats->rx_bytes = net->stats.rx_bytes;
1156*4882a593Smuzhiyun 	stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
1157*4882a593Smuzhiyun 		net->stats.rx_over_errors + net->stats.rx_crc_errors +
1158*4882a593Smuzhiyun 		net->stats.rx_missed_errors;
1159*4882a593Smuzhiyun 	stats->tx_errors = net->stats.tx_errors;
1160*4882a593Smuzhiyun 	stats->rx_length_errors = net->stats.rx_length_errors;
1161*4882a593Smuzhiyun 	stats->rx_over_errors = net->stats.rx_over_errors;
1162*4882a593Smuzhiyun 	stats->rx_crc_errors = net->stats.rx_crc_errors;
1163*4882a593Smuzhiyun 	stats->rx_missed_errors = net->stats.rx_missed_errors;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun static const struct net_device_ops tbnet_netdev_ops = {
1167*4882a593Smuzhiyun 	.ndo_open = tbnet_open,
1168*4882a593Smuzhiyun 	.ndo_stop = tbnet_stop,
1169*4882a593Smuzhiyun 	.ndo_start_xmit = tbnet_start_xmit,
1170*4882a593Smuzhiyun 	.ndo_get_stats64 = tbnet_get_stats64,
1171*4882a593Smuzhiyun };
1172*4882a593Smuzhiyun 
tbnet_generate_mac(struct net_device * dev)1173*4882a593Smuzhiyun static void tbnet_generate_mac(struct net_device *dev)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	const struct tbnet *net = netdev_priv(dev);
1176*4882a593Smuzhiyun 	const struct tb_xdomain *xd = net->xd;
1177*4882a593Smuzhiyun 	u8 phy_port;
1178*4882a593Smuzhiyun 	u32 hash;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	/* Unicast and locally administered MAC */
1183*4882a593Smuzhiyun 	dev->dev_addr[0] = phy_port << 4 | 0x02;
1184*4882a593Smuzhiyun 	hash = jhash2((u32 *)xd->local_uuid, 4, 0);
1185*4882a593Smuzhiyun 	memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
1186*4882a593Smuzhiyun 	hash = jhash2((u32 *)xd->local_uuid, 4, hash);
1187*4882a593Smuzhiyun 	dev->dev_addr[5] = hash & 0xff;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
tbnet_probe(struct tb_service * svc,const struct tb_service_id * id)1190*4882a593Smuzhiyun static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun 	struct tb_xdomain *xd = tb_service_parent(svc);
1193*4882a593Smuzhiyun 	struct net_device *dev;
1194*4882a593Smuzhiyun 	struct tbnet *net;
1195*4882a593Smuzhiyun 	int ret;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(*net));
1198*4882a593Smuzhiyun 	if (!dev)
1199*4882a593Smuzhiyun 		return -ENOMEM;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &svc->dev);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	net = netdev_priv(dev);
1204*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1205*4882a593Smuzhiyun 	INIT_WORK(&net->connected_work, tbnet_connected_work);
1206*4882a593Smuzhiyun 	INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
1207*4882a593Smuzhiyun 	mutex_init(&net->connection_lock);
1208*4882a593Smuzhiyun 	atomic_set(&net->command_id, 0);
1209*4882a593Smuzhiyun 	atomic_set(&net->frame_id, 0);
1210*4882a593Smuzhiyun 	net->svc = svc;
1211*4882a593Smuzhiyun 	net->dev = dev;
1212*4882a593Smuzhiyun 	net->xd = xd;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	tbnet_generate_mac(dev);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	strcpy(dev->name, "thunderbolt%d");
1217*4882a593Smuzhiyun 	dev->netdev_ops = &tbnet_netdev_ops;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/* ThunderboltIP takes advantage of TSO packets but instead of
1220*4882a593Smuzhiyun 	 * segmenting them we just split the packet into Thunderbolt
1221*4882a593Smuzhiyun 	 * frames (maximum payload size of each frame is 4084 bytes) and
1222*4882a593Smuzhiyun 	 * calculate checksum over the whole packet here.
1223*4882a593Smuzhiyun 	 *
1224*4882a593Smuzhiyun 	 * The receiving side does the opposite if the host OS supports
1225*4882a593Smuzhiyun 	 * LRO, otherwise it needs to split the large packet into MTU
1226*4882a593Smuzhiyun 	 * sized smaller packets.
1227*4882a593Smuzhiyun 	 *
1228*4882a593Smuzhiyun 	 * In order to receive large packets from the networking stack,
1229*4882a593Smuzhiyun 	 * we need to announce support for most of the offloading
1230*4882a593Smuzhiyun 	 * features here.
1231*4882a593Smuzhiyun 	 */
1232*4882a593Smuzhiyun 	dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
1233*4882a593Smuzhiyun 			   NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1234*4882a593Smuzhiyun 	dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1235*4882a593Smuzhiyun 	dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	/* MTU range: 68 - 65522 */
1240*4882a593Smuzhiyun 	dev->min_mtu = ETH_MIN_MTU;
1241*4882a593Smuzhiyun 	dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	net->handler.uuid = &tbnet_svc_uuid;
1244*4882a593Smuzhiyun 	net->handler.callback = tbnet_handle_packet,
1245*4882a593Smuzhiyun 	net->handler.data = net;
1246*4882a593Smuzhiyun 	tb_register_protocol_handler(&net->handler);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	tb_service_set_drvdata(svc, net);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	ret = register_netdev(dev);
1251*4882a593Smuzhiyun 	if (ret) {
1252*4882a593Smuzhiyun 		tb_unregister_protocol_handler(&net->handler);
1253*4882a593Smuzhiyun 		free_netdev(dev);
1254*4882a593Smuzhiyun 		return ret;
1255*4882a593Smuzhiyun 	}
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	return 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun 
tbnet_remove(struct tb_service * svc)1260*4882a593Smuzhiyun static void tbnet_remove(struct tb_service *svc)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun 	struct tbnet *net = tb_service_get_drvdata(svc);
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	unregister_netdev(net->dev);
1265*4882a593Smuzhiyun 	tb_unregister_protocol_handler(&net->handler);
1266*4882a593Smuzhiyun 	free_netdev(net->dev);
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
tbnet_shutdown(struct tb_service * svc)1269*4882a593Smuzhiyun static void tbnet_shutdown(struct tb_service *svc)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	tbnet_tear_down(tb_service_get_drvdata(svc), true);
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun 
tbnet_suspend(struct device * dev)1274*4882a593Smuzhiyun static int __maybe_unused tbnet_suspend(struct device *dev)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun 	struct tb_service *svc = tb_to_service(dev);
1277*4882a593Smuzhiyun 	struct tbnet *net = tb_service_get_drvdata(svc);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	stop_login(net);
1280*4882a593Smuzhiyun 	if (netif_running(net->dev)) {
1281*4882a593Smuzhiyun 		netif_device_detach(net->dev);
1282*4882a593Smuzhiyun 		tbnet_tear_down(net, true);
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	tb_unregister_protocol_handler(&net->handler);
1286*4882a593Smuzhiyun 	return 0;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun 
tbnet_resume(struct device * dev)1289*4882a593Smuzhiyun static int __maybe_unused tbnet_resume(struct device *dev)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun 	struct tb_service *svc = tb_to_service(dev);
1292*4882a593Smuzhiyun 	struct tbnet *net = tb_service_get_drvdata(svc);
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	tb_register_protocol_handler(&net->handler);
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	netif_carrier_off(net->dev);
1297*4882a593Smuzhiyun 	if (netif_running(net->dev)) {
1298*4882a593Smuzhiyun 		netif_device_attach(net->dev);
1299*4882a593Smuzhiyun 		start_login(net);
1300*4882a593Smuzhiyun 	}
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	return 0;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun static const struct dev_pm_ops tbnet_pm_ops = {
1306*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
1307*4882a593Smuzhiyun };
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun static const struct tb_service_id tbnet_ids[] = {
1310*4882a593Smuzhiyun 	{ TB_SERVICE("network", 1) },
1311*4882a593Smuzhiyun 	{ },
1312*4882a593Smuzhiyun };
1313*4882a593Smuzhiyun MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun static struct tb_service_driver tbnet_driver = {
1316*4882a593Smuzhiyun 	.driver = {
1317*4882a593Smuzhiyun 		.owner = THIS_MODULE,
1318*4882a593Smuzhiyun 		.name = "thunderbolt-net",
1319*4882a593Smuzhiyun 		.pm = &tbnet_pm_ops,
1320*4882a593Smuzhiyun 	},
1321*4882a593Smuzhiyun 	.probe = tbnet_probe,
1322*4882a593Smuzhiyun 	.remove = tbnet_remove,
1323*4882a593Smuzhiyun 	.shutdown = tbnet_shutdown,
1324*4882a593Smuzhiyun 	.id_table = tbnet_ids,
1325*4882a593Smuzhiyun };
1326*4882a593Smuzhiyun 
tbnet_init(void)1327*4882a593Smuzhiyun static int __init tbnet_init(void)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	int ret;
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
1332*4882a593Smuzhiyun 	if (!tbnet_dir)
1333*4882a593Smuzhiyun 		return -ENOMEM;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	tb_property_add_immediate(tbnet_dir, "prtcid", 1);
1336*4882a593Smuzhiyun 	tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
1337*4882a593Smuzhiyun 	tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
1338*4882a593Smuzhiyun 	/* Currently only announce support for match frags ID (bit 1). Bit 0
1339*4882a593Smuzhiyun 	 * is reserved for full E2E flow control which we do not support at
1340*4882a593Smuzhiyun 	 * the moment.
1341*4882a593Smuzhiyun 	 */
1342*4882a593Smuzhiyun 	tb_property_add_immediate(tbnet_dir, "prtcstns",
1343*4882a593Smuzhiyun 				  TBNET_MATCH_FRAGS_ID);
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	ret = tb_register_property_dir("network", tbnet_dir);
1346*4882a593Smuzhiyun 	if (ret)
1347*4882a593Smuzhiyun 		goto err_free_dir;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	ret = tb_register_service_driver(&tbnet_driver);
1350*4882a593Smuzhiyun 	if (ret)
1351*4882a593Smuzhiyun 		goto err_unregister;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	return 0;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun err_unregister:
1356*4882a593Smuzhiyun 	tb_unregister_property_dir("network", tbnet_dir);
1357*4882a593Smuzhiyun err_free_dir:
1358*4882a593Smuzhiyun 	tb_property_free_dir(tbnet_dir);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	return ret;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun module_init(tbnet_init);
1363*4882a593Smuzhiyun 
tbnet_exit(void)1364*4882a593Smuzhiyun static void __exit tbnet_exit(void)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun 	tb_unregister_service_driver(&tbnet_driver);
1367*4882a593Smuzhiyun 	tb_unregister_property_dir("network", tbnet_dir);
1368*4882a593Smuzhiyun 	tb_property_free_dir(tbnet_dir);
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun module_exit(tbnet_exit);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1373*4882a593Smuzhiyun MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1374*4882a593Smuzhiyun MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1375*4882a593Smuzhiyun MODULE_DESCRIPTION("Thunderbolt network driver");
1376*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1377