1*4882a593Smuzhiyun /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * This software is available to you under a choice of one of two
4*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
5*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
6*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
7*4882a593Smuzhiyun * OpenIB.org BSD license below:
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
10*4882a593Smuzhiyun * without modification, are permitted provided that the following
11*4882a593Smuzhiyun * conditions are met:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * - Redistributions of source code must retain the above
14*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
15*4882a593Smuzhiyun * disclaimer.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
18*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
20*4882a593Smuzhiyun * provided with the distribution.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29*4882a593Smuzhiyun * SOFTWARE.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <net/tls.h>
33*4882a593Smuzhiyun #include <crypto/aead.h>
34*4882a593Smuzhiyun #include <crypto/scatterwalk.h>
35*4882a593Smuzhiyun #include <net/ip6_checksum.h>
36*4882a593Smuzhiyun
chain_to_walk(struct scatterlist * sg,struct scatter_walk * walk)37*4882a593Smuzhiyun static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct scatterlist *src = walk->sg;
40*4882a593Smuzhiyun int diff = walk->offset - src->offset;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun sg_set_page(sg, sg_page(src),
43*4882a593Smuzhiyun src->length - diff, walk->offset);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun scatterwalk_crypto_chain(sg, sg_next(src), 2);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
tls_enc_record(struct aead_request * aead_req,struct crypto_aead * aead,char * aad,char * iv,__be64 rcd_sn,struct scatter_walk * in,struct scatter_walk * out,int * in_len)48*4882a593Smuzhiyun static int tls_enc_record(struct aead_request *aead_req,
49*4882a593Smuzhiyun struct crypto_aead *aead, char *aad,
50*4882a593Smuzhiyun char *iv, __be64 rcd_sn,
51*4882a593Smuzhiyun struct scatter_walk *in,
52*4882a593Smuzhiyun struct scatter_walk *out, int *in_len)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
55*4882a593Smuzhiyun struct scatterlist sg_in[3];
56*4882a593Smuzhiyun struct scatterlist sg_out[3];
57*4882a593Smuzhiyun u16 len;
58*4882a593Smuzhiyun int rc;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun len = min_t(int, *in_len, ARRAY_SIZE(buf));
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun scatterwalk_copychunks(buf, in, len, 0);
63*4882a593Smuzhiyun scatterwalk_copychunks(buf, out, len, 1);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun *in_len -= len;
66*4882a593Smuzhiyun if (!*in_len)
67*4882a593Smuzhiyun return 0;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun scatterwalk_pagedone(in, 0, 1);
70*4882a593Smuzhiyun scatterwalk_pagedone(out, 1, 1);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun len = buf[4] | (buf[3] << 8);
73*4882a593Smuzhiyun len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
76*4882a593Smuzhiyun (char *)&rcd_sn, sizeof(rcd_sn), buf[0],
77*4882a593Smuzhiyun TLS_1_2_VERSION);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
80*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_IV_SIZE);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun sg_init_table(sg_in, ARRAY_SIZE(sg_in));
83*4882a593Smuzhiyun sg_init_table(sg_out, ARRAY_SIZE(sg_out));
84*4882a593Smuzhiyun sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
85*4882a593Smuzhiyun sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
86*4882a593Smuzhiyun chain_to_walk(sg_in + 1, in);
87*4882a593Smuzhiyun chain_to_walk(sg_out + 1, out);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun *in_len -= len;
90*4882a593Smuzhiyun if (*in_len < 0) {
91*4882a593Smuzhiyun *in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
92*4882a593Smuzhiyun /* the input buffer doesn't contain the entire record.
93*4882a593Smuzhiyun * trim len accordingly. The resulting authentication tag
94*4882a593Smuzhiyun * will contain garbage, but we don't care, so we won't
95*4882a593Smuzhiyun * include any of it in the output skb
96*4882a593Smuzhiyun * Note that we assume the output buffer length
97*4882a593Smuzhiyun * is larger then input buffer length + tag size
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun if (*in_len < 0)
100*4882a593Smuzhiyun len += *in_len;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun *in_len = 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (*in_len) {
106*4882a593Smuzhiyun scatterwalk_copychunks(NULL, in, len, 2);
107*4882a593Smuzhiyun scatterwalk_pagedone(in, 0, 1);
108*4882a593Smuzhiyun scatterwalk_copychunks(NULL, out, len, 2);
109*4882a593Smuzhiyun scatterwalk_pagedone(out, 1, 1);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
113*4882a593Smuzhiyun aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun rc = crypto_aead_encrypt(aead_req);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return rc;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
tls_init_aead_request(struct aead_request * aead_req,struct crypto_aead * aead)120*4882a593Smuzhiyun static void tls_init_aead_request(struct aead_request *aead_req,
121*4882a593Smuzhiyun struct crypto_aead *aead)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun aead_request_set_tfm(aead_req, aead);
124*4882a593Smuzhiyun aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
tls_alloc_aead_request(struct crypto_aead * aead,gfp_t flags)127*4882a593Smuzhiyun static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
128*4882a593Smuzhiyun gfp_t flags)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun unsigned int req_size = sizeof(struct aead_request) +
131*4882a593Smuzhiyun crypto_aead_reqsize(aead);
132*4882a593Smuzhiyun struct aead_request *aead_req;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun aead_req = kzalloc(req_size, flags);
135*4882a593Smuzhiyun if (aead_req)
136*4882a593Smuzhiyun tls_init_aead_request(aead_req, aead);
137*4882a593Smuzhiyun return aead_req;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
tls_enc_records(struct aead_request * aead_req,struct crypto_aead * aead,struct scatterlist * sg_in,struct scatterlist * sg_out,char * aad,char * iv,u64 rcd_sn,int len)140*4882a593Smuzhiyun static int tls_enc_records(struct aead_request *aead_req,
141*4882a593Smuzhiyun struct crypto_aead *aead, struct scatterlist *sg_in,
142*4882a593Smuzhiyun struct scatterlist *sg_out, char *aad, char *iv,
143*4882a593Smuzhiyun u64 rcd_sn, int len)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct scatter_walk out, in;
146*4882a593Smuzhiyun int rc;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun scatterwalk_start(&in, sg_in);
149*4882a593Smuzhiyun scatterwalk_start(&out, sg_out);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun do {
152*4882a593Smuzhiyun rc = tls_enc_record(aead_req, aead, aad, iv,
153*4882a593Smuzhiyun cpu_to_be64(rcd_sn), &in, &out, &len);
154*4882a593Smuzhiyun rcd_sn++;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun } while (rc == 0 && len);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun scatterwalk_done(&in, 0, 0);
159*4882a593Smuzhiyun scatterwalk_done(&out, 1, 0);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return rc;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
165*4882a593Smuzhiyun * might have been changed by NAT.
166*4882a593Smuzhiyun */
update_chksum(struct sk_buff * skb,int headln)167*4882a593Smuzhiyun static void update_chksum(struct sk_buff *skb, int headln)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct tcphdr *th = tcp_hdr(skb);
170*4882a593Smuzhiyun int datalen = skb->len - headln;
171*4882a593Smuzhiyun const struct ipv6hdr *ipv6h;
172*4882a593Smuzhiyun const struct iphdr *iph;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* We only changed the payload so if we are using partial we don't
175*4882a593Smuzhiyun * need to update anything.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
178*4882a593Smuzhiyun return;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_PARTIAL;
181*4882a593Smuzhiyun skb->csum_start = skb_transport_header(skb) - skb->head;
182*4882a593Smuzhiyun skb->csum_offset = offsetof(struct tcphdr, check);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (skb->sk->sk_family == AF_INET6) {
185*4882a593Smuzhiyun ipv6h = ipv6_hdr(skb);
186*4882a593Smuzhiyun th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
187*4882a593Smuzhiyun datalen, IPPROTO_TCP, 0);
188*4882a593Smuzhiyun } else {
189*4882a593Smuzhiyun iph = ip_hdr(skb);
190*4882a593Smuzhiyun th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
191*4882a593Smuzhiyun IPPROTO_TCP, 0);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
complete_skb(struct sk_buff * nskb,struct sk_buff * skb,int headln)195*4882a593Smuzhiyun static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct sock *sk = skb->sk;
198*4882a593Smuzhiyun int delta;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun skb_copy_header(nskb, skb);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun skb_put(nskb, skb->len);
203*4882a593Smuzhiyun memcpy(nskb->data, skb->data, headln);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun nskb->destructor = skb->destructor;
206*4882a593Smuzhiyun nskb->sk = sk;
207*4882a593Smuzhiyun skb->destructor = NULL;
208*4882a593Smuzhiyun skb->sk = NULL;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun update_chksum(nskb, headln);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* sock_efree means skb must gone through skb_orphan_partial() */
213*4882a593Smuzhiyun if (nskb->destructor == sock_efree)
214*4882a593Smuzhiyun return;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun delta = nskb->truesize - skb->truesize;
217*4882a593Smuzhiyun if (likely(delta < 0))
218*4882a593Smuzhiyun WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
219*4882a593Smuzhiyun else if (delta)
220*4882a593Smuzhiyun refcount_add(delta, &sk->sk_wmem_alloc);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* This function may be called after the user socket is already
224*4882a593Smuzhiyun * closed so make sure we don't use anything freed during
225*4882a593Smuzhiyun * tls_sk_proto_close here
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun
fill_sg_in(struct scatterlist * sg_in,struct sk_buff * skb,struct tls_offload_context_tx * ctx,u64 * rcd_sn,s32 * sync_size,int * resync_sgs)228*4882a593Smuzhiyun static int fill_sg_in(struct scatterlist *sg_in,
229*4882a593Smuzhiyun struct sk_buff *skb,
230*4882a593Smuzhiyun struct tls_offload_context_tx *ctx,
231*4882a593Smuzhiyun u64 *rcd_sn,
232*4882a593Smuzhiyun s32 *sync_size,
233*4882a593Smuzhiyun int *resync_sgs)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
236*4882a593Smuzhiyun int payload_len = skb->len - tcp_payload_offset;
237*4882a593Smuzhiyun u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
238*4882a593Smuzhiyun struct tls_record_info *record;
239*4882a593Smuzhiyun unsigned long flags;
240*4882a593Smuzhiyun int remaining;
241*4882a593Smuzhiyun int i;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun spin_lock_irqsave(&ctx->lock, flags);
244*4882a593Smuzhiyun record = tls_get_record(ctx, tcp_seq, rcd_sn);
245*4882a593Smuzhiyun if (!record) {
246*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
247*4882a593Smuzhiyun return -EINVAL;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun *sync_size = tcp_seq - tls_record_start_seq(record);
251*4882a593Smuzhiyun if (*sync_size < 0) {
252*4882a593Smuzhiyun int is_start_marker = tls_record_is_start_marker(record);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
255*4882a593Smuzhiyun /* This should only occur if the relevant record was
256*4882a593Smuzhiyun * already acked. In that case it should be ok
257*4882a593Smuzhiyun * to drop the packet and avoid retransmission.
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * There is a corner case where the packet contains
260*4882a593Smuzhiyun * both an acked and a non-acked record.
261*4882a593Smuzhiyun * We currently don't handle that case and rely
262*4882a593Smuzhiyun * on TCP to retranmit a packet that doesn't contain
263*4882a593Smuzhiyun * already acked payload.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun if (!is_start_marker)
266*4882a593Smuzhiyun *sync_size = 0;
267*4882a593Smuzhiyun return -EINVAL;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun remaining = *sync_size;
271*4882a593Smuzhiyun for (i = 0; remaining > 0; i++) {
272*4882a593Smuzhiyun skb_frag_t *frag = &record->frags[i];
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun __skb_frag_ref(frag);
275*4882a593Smuzhiyun sg_set_page(sg_in + i, skb_frag_page(frag),
276*4882a593Smuzhiyun skb_frag_size(frag), skb_frag_off(frag));
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun remaining -= skb_frag_size(frag);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (remaining < 0)
281*4882a593Smuzhiyun sg_in[i].length += remaining;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun *resync_sgs = i;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun spin_unlock_irqrestore(&ctx->lock, flags);
286*4882a593Smuzhiyun if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
287*4882a593Smuzhiyun return -EINVAL;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
fill_sg_out(struct scatterlist sg_out[3],void * buf,struct tls_context * tls_ctx,struct sk_buff * nskb,int tcp_payload_offset,int payload_len,int sync_size,void * dummy_buf)292*4882a593Smuzhiyun static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
293*4882a593Smuzhiyun struct tls_context *tls_ctx,
294*4882a593Smuzhiyun struct sk_buff *nskb,
295*4882a593Smuzhiyun int tcp_payload_offset,
296*4882a593Smuzhiyun int payload_len,
297*4882a593Smuzhiyun int sync_size,
298*4882a593Smuzhiyun void *dummy_buf)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun sg_set_buf(&sg_out[0], dummy_buf, sync_size);
301*4882a593Smuzhiyun sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
302*4882a593Smuzhiyun /* Add room for authentication tag produced by crypto */
303*4882a593Smuzhiyun dummy_buf += sync_size;
304*4882a593Smuzhiyun sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
tls_enc_skb(struct tls_context * tls_ctx,struct scatterlist sg_out[3],struct scatterlist * sg_in,struct sk_buff * skb,s32 sync_size,u64 rcd_sn)307*4882a593Smuzhiyun static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
308*4882a593Smuzhiyun struct scatterlist sg_out[3],
309*4882a593Smuzhiyun struct scatterlist *sg_in,
310*4882a593Smuzhiyun struct sk_buff *skb,
311*4882a593Smuzhiyun s32 sync_size, u64 rcd_sn)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
314*4882a593Smuzhiyun struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
315*4882a593Smuzhiyun int payload_len = skb->len - tcp_payload_offset;
316*4882a593Smuzhiyun void *buf, *iv, *aad, *dummy_buf;
317*4882a593Smuzhiyun struct aead_request *aead_req;
318*4882a593Smuzhiyun struct sk_buff *nskb = NULL;
319*4882a593Smuzhiyun int buf_len;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
322*4882a593Smuzhiyun if (!aead_req)
323*4882a593Smuzhiyun return NULL;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
326*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_IV_SIZE +
327*4882a593Smuzhiyun TLS_AAD_SPACE_SIZE +
328*4882a593Smuzhiyun sync_size +
329*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_TAG_SIZE;
330*4882a593Smuzhiyun buf = kmalloc(buf_len, GFP_ATOMIC);
331*4882a593Smuzhiyun if (!buf)
332*4882a593Smuzhiyun goto free_req;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun iv = buf;
335*4882a593Smuzhiyun memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
336*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_SALT_SIZE);
337*4882a593Smuzhiyun aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
338*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_IV_SIZE;
339*4882a593Smuzhiyun dummy_buf = aad + TLS_AAD_SPACE_SIZE;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
342*4882a593Smuzhiyun if (!nskb)
343*4882a593Smuzhiyun goto free_buf;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun skb_reserve(nskb, skb_headroom(skb));
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
348*4882a593Smuzhiyun payload_len, sync_size, dummy_buf);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
351*4882a593Smuzhiyun rcd_sn, sync_size + payload_len) < 0)
352*4882a593Smuzhiyun goto free_nskb;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun complete_skb(nskb, skb, tcp_payload_offset);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* validate_xmit_skb_list assumes that if the skb wasn't segmented
357*4882a593Smuzhiyun * nskb->prev will point to the skb itself
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun nskb->prev = nskb;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun free_buf:
362*4882a593Smuzhiyun kfree(buf);
363*4882a593Smuzhiyun free_req:
364*4882a593Smuzhiyun kfree(aead_req);
365*4882a593Smuzhiyun return nskb;
366*4882a593Smuzhiyun free_nskb:
367*4882a593Smuzhiyun kfree_skb(nskb);
368*4882a593Smuzhiyun nskb = NULL;
369*4882a593Smuzhiyun goto free_buf;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
tls_sw_fallback(struct sock * sk,struct sk_buff * skb)372*4882a593Smuzhiyun static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
375*4882a593Smuzhiyun struct tls_context *tls_ctx = tls_get_ctx(sk);
376*4882a593Smuzhiyun struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
377*4882a593Smuzhiyun int payload_len = skb->len - tcp_payload_offset;
378*4882a593Smuzhiyun struct scatterlist *sg_in, sg_out[3];
379*4882a593Smuzhiyun struct sk_buff *nskb = NULL;
380*4882a593Smuzhiyun int sg_in_max_elements;
381*4882a593Smuzhiyun int resync_sgs = 0;
382*4882a593Smuzhiyun s32 sync_size = 0;
383*4882a593Smuzhiyun u64 rcd_sn;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* worst case is:
386*4882a593Smuzhiyun * MAX_SKB_FRAGS in tls_record_info
387*4882a593Smuzhiyun * MAX_SKB_FRAGS + 1 in SKB head and frags.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (!payload_len)
392*4882a593Smuzhiyun return skb;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
395*4882a593Smuzhiyun if (!sg_in)
396*4882a593Smuzhiyun goto free_orig;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun sg_init_table(sg_in, sg_in_max_elements);
399*4882a593Smuzhiyun sg_init_table(sg_out, ARRAY_SIZE(sg_out));
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
402*4882a593Smuzhiyun /* bypass packets before kernel TLS socket option was set */
403*4882a593Smuzhiyun if (sync_size < 0 && payload_len <= -sync_size)
404*4882a593Smuzhiyun nskb = skb_get(skb);
405*4882a593Smuzhiyun goto put_sg;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun put_sg:
411*4882a593Smuzhiyun while (resync_sgs)
412*4882a593Smuzhiyun put_page(sg_page(&sg_in[--resync_sgs]));
413*4882a593Smuzhiyun kfree(sg_in);
414*4882a593Smuzhiyun free_orig:
415*4882a593Smuzhiyun if (nskb)
416*4882a593Smuzhiyun consume_skb(skb);
417*4882a593Smuzhiyun else
418*4882a593Smuzhiyun kfree_skb(skb);
419*4882a593Smuzhiyun return nskb;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
tls_validate_xmit_skb(struct sock * sk,struct net_device * dev,struct sk_buff * skb)422*4882a593Smuzhiyun struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
423*4882a593Smuzhiyun struct net_device *dev,
424*4882a593Smuzhiyun struct sk_buff *skb)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun if (dev == tls_get_ctx(sk)->netdev)
427*4882a593Smuzhiyun return skb;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return tls_sw_fallback(sk, skb);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
432*4882a593Smuzhiyun
tls_validate_xmit_skb_sw(struct sock * sk,struct net_device * dev,struct sk_buff * skb)433*4882a593Smuzhiyun struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
434*4882a593Smuzhiyun struct net_device *dev,
435*4882a593Smuzhiyun struct sk_buff *skb)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun return tls_sw_fallback(sk, skb);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
tls_encrypt_skb(struct sk_buff * skb)440*4882a593Smuzhiyun struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun return tls_sw_fallback(skb->sk, skb);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tls_encrypt_skb);
445*4882a593Smuzhiyun
tls_sw_fallback_init(struct sock * sk,struct tls_offload_context_tx * offload_ctx,struct tls_crypto_info * crypto_info)446*4882a593Smuzhiyun int tls_sw_fallback_init(struct sock *sk,
447*4882a593Smuzhiyun struct tls_offload_context_tx *offload_ctx,
448*4882a593Smuzhiyun struct tls_crypto_info *crypto_info)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun const u8 *key;
451*4882a593Smuzhiyun int rc;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun offload_ctx->aead_send =
454*4882a593Smuzhiyun crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
455*4882a593Smuzhiyun if (IS_ERR(offload_ctx->aead_send)) {
456*4882a593Smuzhiyun rc = PTR_ERR(offload_ctx->aead_send);
457*4882a593Smuzhiyun pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
458*4882a593Smuzhiyun offload_ctx->aead_send = NULL;
459*4882a593Smuzhiyun goto err_out;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun rc = crypto_aead_setkey(offload_ctx->aead_send, key,
465*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_KEY_SIZE);
466*4882a593Smuzhiyun if (rc)
467*4882a593Smuzhiyun goto free_aead;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun rc = crypto_aead_setauthsize(offload_ctx->aead_send,
470*4882a593Smuzhiyun TLS_CIPHER_AES_GCM_128_TAG_SIZE);
471*4882a593Smuzhiyun if (rc)
472*4882a593Smuzhiyun goto free_aead;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return 0;
475*4882a593Smuzhiyun free_aead:
476*4882a593Smuzhiyun crypto_free_aead(offload_ctx->aead_send);
477*4882a593Smuzhiyun err_out:
478*4882a593Smuzhiyun return rc;
479*4882a593Smuzhiyun }
480