xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include "en_accel/ktls_txrx.h"
5 #include "en_accel/ktls_utils.h"
6 
7 struct mlx5e_dump_wqe {
8 	struct mlx5_wqe_ctrl_seg ctrl;
9 	struct mlx5_wqe_data_seg data;
10 };
11 
12 #define MLX5E_KTLS_DUMP_WQEBBS \
13 	(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
14 
15 static u8
mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq * sq,unsigned int nfrags,unsigned int sync_len)16 mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
17 			  unsigned int sync_len)
18 {
19 	/* Given the MTU and sync_len, calculates an upper bound for the
20 	 * number of DUMP WQEs needed for the TX resync of a record.
21 	 */
22 	return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
23 }
24 
mlx5e_ktls_get_stop_room(struct mlx5e_txqsq * sq)25 u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
26 {
27 	u16 num_dumps, stop_room = 0;
28 
29 	num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
30 
31 	stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
32 	stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
33 	stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
34 
35 	return stop_room;
36 }
37 
mlx5e_ktls_create_tis(struct mlx5_core_dev * mdev,u32 * tisn)38 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
39 {
40 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
41 	void *tisc;
42 
43 	tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
44 
45 	MLX5_SET(tisc, tisc, tls_en, 1);
46 
47 	return mlx5e_create_tis(mdev, in, tisn);
48 }
49 
50 struct mlx5e_ktls_offload_context_tx {
51 	struct tls_offload_context_tx *tx_ctx;
52 	struct tls12_crypto_info_aes_gcm_128 crypto_info;
53 	u32 expected_seq;
54 	u32 tisn;
55 	u32 key_id;
56 	bool ctx_post_pending;
57 };
58 
59 static void
mlx5e_set_ktls_tx_priv_ctx(struct tls_context * tls_ctx,struct mlx5e_ktls_offload_context_tx * priv_tx)60 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
61 			   struct mlx5e_ktls_offload_context_tx *priv_tx)
62 {
63 	struct mlx5e_ktls_offload_context_tx **ctx =
64 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
65 
66 	BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
67 
68 	*ctx = priv_tx;
69 }
70 
71 static struct mlx5e_ktls_offload_context_tx *
mlx5e_get_ktls_tx_priv_ctx(struct tls_context * tls_ctx)72 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
73 {
74 	struct mlx5e_ktls_offload_context_tx **ctx =
75 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
76 
77 	return *ctx;
78 }
79 
mlx5e_ktls_add_tx(struct net_device * netdev,struct sock * sk,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)80 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
81 		      struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
82 {
83 	struct mlx5e_ktls_offload_context_tx *priv_tx;
84 	struct tls_context *tls_ctx;
85 	struct mlx5_core_dev *mdev;
86 	struct mlx5e_priv *priv;
87 	int err;
88 
89 	tls_ctx = tls_get_ctx(sk);
90 	priv = netdev_priv(netdev);
91 	mdev = priv->mdev;
92 
93 	priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
94 	if (!priv_tx)
95 		return -ENOMEM;
96 
97 	err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
98 	if (err)
99 		goto err_create_key;
100 
101 	priv_tx->expected_seq = start_offload_tcp_sn;
102 	priv_tx->crypto_info  =
103 		*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
104 	priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
105 
106 	mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
107 
108 	err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
109 	if (err)
110 		goto err_create_tis;
111 
112 	priv_tx->ctx_post_pending = true;
113 
114 	return 0;
115 
116 err_create_tis:
117 	mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
118 err_create_key:
119 	kfree(priv_tx);
120 	return err;
121 }
122 
mlx5e_ktls_del_tx(struct net_device * netdev,struct tls_context * tls_ctx)123 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
124 {
125 	struct mlx5e_ktls_offload_context_tx *priv_tx;
126 	struct mlx5_core_dev *mdev;
127 	struct mlx5e_priv *priv;
128 
129 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
130 	priv = netdev_priv(netdev);
131 	mdev = priv->mdev;
132 
133 	mlx5e_destroy_tis(mdev, priv_tx->tisn);
134 	mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
135 	kfree(priv_tx);
136 }
137 
tx_fill_wi(struct mlx5e_txqsq * sq,u16 pi,u8 num_wqebbs,u32 num_bytes,struct page * page)138 static void tx_fill_wi(struct mlx5e_txqsq *sq,
139 		       u16 pi, u8 num_wqebbs, u32 num_bytes,
140 		       struct page *page)
141 {
142 	struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
143 
144 	*wi = (struct mlx5e_tx_wqe_info) {
145 		.num_wqebbs = num_wqebbs,
146 		.num_bytes  = num_bytes,
147 		.resync_dump_frag_page = page,
148 	};
149 }
150 
151 static bool
mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx * priv_tx)152 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
153 {
154 	bool ret = priv_tx->ctx_post_pending;
155 
156 	priv_tx->ctx_post_pending = false;
157 
158 	return ret;
159 }
160 
161 static void
post_static_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)162 post_static_params(struct mlx5e_txqsq *sq,
163 		   struct mlx5e_ktls_offload_context_tx *priv_tx,
164 		   bool fence)
165 {
166 	struct mlx5e_set_tls_static_params_wqe *wqe;
167 	u16 pi, num_wqebbs;
168 
169 	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
170 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
171 	wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
172 	mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
173 				       priv_tx->tisn, priv_tx->key_id, 0, fence,
174 				       TLS_OFFLOAD_CTX_DIR_TX);
175 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
176 	sq->pc += num_wqebbs;
177 }
178 
179 static void
post_progress_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)180 post_progress_params(struct mlx5e_txqsq *sq,
181 		     struct mlx5e_ktls_offload_context_tx *priv_tx,
182 		     bool fence)
183 {
184 	struct mlx5e_set_tls_progress_params_wqe *wqe;
185 	u16 pi, num_wqebbs;
186 
187 	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
188 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
189 	wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
190 	mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
191 					 TLS_OFFLOAD_CTX_DIR_TX);
192 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
193 	sq->pc += num_wqebbs;
194 }
195 
196 static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool skip_static_post,bool fence_first_post)197 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
198 			      struct mlx5e_ktls_offload_context_tx *priv_tx,
199 			      bool skip_static_post, bool fence_first_post)
200 {
201 	bool progress_fence = skip_static_post || !fence_first_post;
202 
203 	if (!skip_static_post)
204 		post_static_params(sq, priv_tx, fence_first_post);
205 
206 	post_progress_params(sq, priv_tx, progress_fence);
207 }
208 
209 struct tx_sync_info {
210 	u64 rcd_sn;
211 	u32 sync_len;
212 	int nr_frags;
213 	skb_frag_t frags[MAX_SKB_FRAGS];
214 };
215 
216 enum mlx5e_ktls_sync_retval {
217 	MLX5E_KTLS_SYNC_DONE,
218 	MLX5E_KTLS_SYNC_FAIL,
219 	MLX5E_KTLS_SYNC_SKIP_NO_DATA,
220 };
221 
222 static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx * priv_tx,u32 tcp_seq,int datalen,struct tx_sync_info * info)223 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
224 		 u32 tcp_seq, int datalen, struct tx_sync_info *info)
225 {
226 	struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
227 	enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
228 	struct tls_record_info *record;
229 	int remaining, i = 0;
230 	unsigned long flags;
231 	bool ends_before;
232 
233 	spin_lock_irqsave(&tx_ctx->lock, flags);
234 	record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
235 
236 	if (unlikely(!record)) {
237 		ret = MLX5E_KTLS_SYNC_FAIL;
238 		goto out;
239 	}
240 
241 	/* There are the following cases:
242 	 * 1. packet ends before start marker: bypass offload.
243 	 * 2. packet starts before start marker and ends after it: drop,
244 	 *    not supported, breaks contract with kernel.
245 	 * 3. packet ends before tls record info starts: drop,
246 	 *    this packet was already acknowledged and its record info
247 	 *    was released.
248 	 */
249 	ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
250 
251 	if (unlikely(tls_record_is_start_marker(record))) {
252 		ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
253 		goto out;
254 	} else if (ends_before) {
255 		ret = MLX5E_KTLS_SYNC_FAIL;
256 		goto out;
257 	}
258 
259 	info->sync_len = tcp_seq - tls_record_start_seq(record);
260 	remaining = info->sync_len;
261 	while (remaining > 0) {
262 		skb_frag_t *frag = &record->frags[i];
263 
264 		get_page(skb_frag_page(frag));
265 		remaining -= skb_frag_size(frag);
266 		info->frags[i++] = *frag;
267 	}
268 	/* reduce the part which will be sent with the original SKB */
269 	if (remaining < 0)
270 		skb_frag_size_add(&info->frags[i - 1], remaining);
271 	info->nr_frags = i;
272 out:
273 	spin_unlock_irqrestore(&tx_ctx->lock, flags);
274 	return ret;
275 }
276 
277 static void
tx_post_resync_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,u64 rcd_sn)278 tx_post_resync_params(struct mlx5e_txqsq *sq,
279 		      struct mlx5e_ktls_offload_context_tx *priv_tx,
280 		      u64 rcd_sn)
281 {
282 	struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
283 	__be64 rn_be = cpu_to_be64(rcd_sn);
284 	bool skip_static_post;
285 	u16 rec_seq_sz;
286 	char *rec_seq;
287 
288 	rec_seq = info->rec_seq;
289 	rec_seq_sz = sizeof(info->rec_seq);
290 
291 	skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
292 	if (!skip_static_post)
293 		memcpy(rec_seq, &rn_be, rec_seq_sz);
294 
295 	mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
296 }
297 
298 static int
tx_post_resync_dump(struct mlx5e_txqsq * sq,skb_frag_t * frag,u32 tisn,bool first)299 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
300 {
301 	struct mlx5_wqe_ctrl_seg *cseg;
302 	struct mlx5_wqe_data_seg *dseg;
303 	struct mlx5e_dump_wqe *wqe;
304 	dma_addr_t dma_addr = 0;
305 	u16 ds_cnt;
306 	int fsz;
307 	u16 pi;
308 
309 	BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
310 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
311 	wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
312 
313 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
314 
315 	cseg = &wqe->ctrl;
316 	dseg = &wqe->data;
317 
318 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
319 	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
320 	cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
321 	cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
322 
323 	fsz = skb_frag_size(frag);
324 	dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
325 				    DMA_TO_DEVICE);
326 	if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
327 		return -ENOMEM;
328 
329 	dseg->addr       = cpu_to_be64(dma_addr);
330 	dseg->lkey       = sq->mkey_be;
331 	dseg->byte_count = cpu_to_be32(fsz);
332 	mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
333 
334 	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
335 	sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
336 
337 	return 0;
338 }
339 
mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe_info * wi,u32 * dma_fifo_cc)340 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
341 					   struct mlx5e_tx_wqe_info *wi,
342 					   u32 *dma_fifo_cc)
343 {
344 	struct mlx5e_sq_stats *stats;
345 	struct mlx5e_sq_dma *dma;
346 
347 	dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
348 	stats = sq->stats;
349 
350 	mlx5e_tx_dma_unmap(sq->pdev, dma);
351 	put_page(wi->resync_dump_frag_page);
352 	stats->tls_dump_packets++;
353 	stats->tls_dump_bytes += wi->num_bytes;
354 }
355 
tx_post_fence_nop(struct mlx5e_txqsq * sq)356 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
357 {
358 	struct mlx5_wq_cyc *wq = &sq->wq;
359 	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
360 
361 	tx_fill_wi(sq, pi, 1, 0, NULL);
362 
363 	mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
364 }
365 
366 static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx * priv_tx,struct mlx5e_txqsq * sq,int datalen,u32 seq)367 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
368 			 struct mlx5e_txqsq *sq,
369 			 int datalen,
370 			 u32 seq)
371 {
372 	struct mlx5e_sq_stats *stats = sq->stats;
373 	enum mlx5e_ktls_sync_retval ret;
374 	struct tx_sync_info info = {};
375 	int i = 0;
376 
377 	ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
378 	if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
379 		if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
380 			stats->tls_skip_no_sync_data++;
381 			return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
382 		}
383 		/* We might get here if a retransmission reaches the driver
384 		 * after the relevant record is acked.
385 		 * It should be safe to drop the packet in this case
386 		 */
387 		stats->tls_drop_no_sync_data++;
388 		goto err_out;
389 	}
390 
391 	stats->tls_ooo++;
392 
393 	tx_post_resync_params(sq, priv_tx, info.rcd_sn);
394 
395 	/* If no dump WQE was sent, we need to have a fence NOP WQE before the
396 	 * actual data xmit.
397 	 */
398 	if (!info.nr_frags) {
399 		tx_post_fence_nop(sq);
400 		return MLX5E_KTLS_SYNC_DONE;
401 	}
402 
403 	for (; i < info.nr_frags; i++) {
404 		unsigned int orig_fsz, frag_offset = 0, n = 0;
405 		skb_frag_t *f = &info.frags[i];
406 
407 		orig_fsz = skb_frag_size(f);
408 
409 		do {
410 			bool fence = !(i || frag_offset);
411 			unsigned int fsz;
412 
413 			n++;
414 			fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
415 			skb_frag_size_set(f, fsz);
416 			if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
417 				page_ref_add(skb_frag_page(f), n - 1);
418 				goto err_out;
419 			}
420 
421 			skb_frag_off_add(f, fsz);
422 			frag_offset += fsz;
423 		} while (frag_offset < orig_fsz);
424 
425 		page_ref_add(skb_frag_page(f), n - 1);
426 	}
427 
428 	return MLX5E_KTLS_SYNC_DONE;
429 
430 err_out:
431 	for (; i < info.nr_frags; i++)
432 		/* The put_page() here undoes the page ref obtained in tx_sync_info_get().
433 		 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
434 		 * released only upon their completions (or in mlx5e_free_txqsq_descs,
435 		 * if channel closes).
436 		 */
437 		put_page(skb_frag_page(&info.frags[i]));
438 
439 	return MLX5E_KTLS_SYNC_FAIL;
440 }
441 
mlx5e_ktls_handle_tx_skb(struct tls_context * tls_ctx,struct mlx5e_txqsq * sq,struct sk_buff * skb,int datalen,struct mlx5e_accel_tx_tls_state * state)442 bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
443 			      struct sk_buff *skb, int datalen,
444 			      struct mlx5e_accel_tx_tls_state *state)
445 {
446 	struct mlx5e_ktls_offload_context_tx *priv_tx;
447 	struct mlx5e_sq_stats *stats = sq->stats;
448 	u32 seq;
449 
450 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
451 
452 	if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
453 		mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
454 		stats->tls_ctx++;
455 	}
456 
457 	seq = ntohl(tcp_hdr(skb)->seq);
458 	if (unlikely(priv_tx->expected_seq != seq)) {
459 		enum mlx5e_ktls_sync_retval ret =
460 			mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
461 
462 		switch (ret) {
463 		case MLX5E_KTLS_SYNC_DONE:
464 			break;
465 		case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
466 			if (likely(!skb->decrypted))
467 				goto out;
468 			WARN_ON_ONCE(1);
469 			fallthrough;
470 		case MLX5E_KTLS_SYNC_FAIL:
471 			goto err_out;
472 		}
473 	}
474 
475 	priv_tx->expected_seq = seq + datalen;
476 
477 	state->tls_tisn = priv_tx->tisn;
478 
479 	stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
480 	stats->tls_encrypted_bytes   += datalen;
481 
482 out:
483 	return true;
484 
485 err_out:
486 	dev_kfree_skb_any(skb);
487 	return false;
488 }
489