xref: /OK3568_Linux_fs/kernel/net/ipv6/esp6.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C)2002 USAGI/WIDE Project
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *	Mitsuru KANDA @USAGI       : IPv6 Support
8*4882a593Smuzhiyun  *	Kazunori MIYAZAWA @USAGI   :
9*4882a593Smuzhiyun  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *	This file is derived from net/ipv4/esp.c
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define pr_fmt(fmt) "IPv6: " fmt
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <crypto/aead.h>
17*4882a593Smuzhiyun #include <crypto/authenc.h>
18*4882a593Smuzhiyun #include <linux/err.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <net/ip.h>
21*4882a593Smuzhiyun #include <net/xfrm.h>
22*4882a593Smuzhiyun #include <net/esp.h>
23*4882a593Smuzhiyun #include <linux/scatterlist.h>
24*4882a593Smuzhiyun #include <linux/kernel.h>
25*4882a593Smuzhiyun #include <linux/pfkeyv2.h>
26*4882a593Smuzhiyun #include <linux/random.h>
27*4882a593Smuzhiyun #include <linux/slab.h>
28*4882a593Smuzhiyun #include <linux/spinlock.h>
29*4882a593Smuzhiyun #include <net/ip6_checksum.h>
30*4882a593Smuzhiyun #include <net/ip6_route.h>
31*4882a593Smuzhiyun #include <net/icmp.h>
32*4882a593Smuzhiyun #include <net/ipv6.h>
33*4882a593Smuzhiyun #include <net/protocol.h>
34*4882a593Smuzhiyun #include <net/udp.h>
35*4882a593Smuzhiyun #include <linux/icmpv6.h>
36*4882a593Smuzhiyun #include <net/tcp.h>
37*4882a593Smuzhiyun #include <net/espintcp.h>
38*4882a593Smuzhiyun #include <net/inet6_hashtables.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <linux/highmem.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct esp_skb_cb {
43*4882a593Smuzhiyun 	struct xfrm_skb_cb xfrm;
44*4882a593Smuzhiyun 	void *tmp;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct esp_output_extra {
48*4882a593Smuzhiyun 	__be32 seqhi;
49*4882a593Smuzhiyun 	u32 esphoff;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * Allocate an AEAD request structure with extra space for SG and IV.
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * For alignment considerations the upper 32 bits of the sequence number are
58*4882a593Smuzhiyun  * placed at the front, if present. Followed by the IV, the request and finally
59*4882a593Smuzhiyun  * the SG list.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * TODO: Use spare space in skb for this where possible.
62*4882a593Smuzhiyun  */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int seqihlen)63*4882a593Smuzhiyun static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	unsigned int len;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	len = seqihlen;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	len += crypto_aead_ivsize(aead);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (len) {
72*4882a593Smuzhiyun 		len += crypto_aead_alignmask(aead) &
73*4882a593Smuzhiyun 		       ~(crypto_tfm_ctx_alignment() - 1);
74*4882a593Smuzhiyun 		len = ALIGN(len, crypto_tfm_ctx_alignment());
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
78*4882a593Smuzhiyun 	len = ALIGN(len, __alignof__(struct scatterlist));
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	len += sizeof(struct scatterlist) * nfrags;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	return kmalloc(len, GFP_ATOMIC);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
esp_tmp_extra(void * tmp)85*4882a593Smuzhiyun static inline void *esp_tmp_extra(void *tmp)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int seqhilen)90*4882a593Smuzhiyun static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	return crypto_aead_ivsize(aead) ?
93*4882a593Smuzhiyun 	       PTR_ALIGN((u8 *)tmp + seqhilen,
94*4882a593Smuzhiyun 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
esp_tmp_req(struct crypto_aead * aead,u8 * iv)97*4882a593Smuzhiyun static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct aead_request *req;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
102*4882a593Smuzhiyun 				crypto_tfm_ctx_alignment());
103*4882a593Smuzhiyun 	aead_request_set_tfm(req, aead);
104*4882a593Smuzhiyun 	return req;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)107*4882a593Smuzhiyun static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
108*4882a593Smuzhiyun 					     struct aead_request *req)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	return (void *)ALIGN((unsigned long)(req + 1) +
111*4882a593Smuzhiyun 			     crypto_aead_reqsize(aead),
112*4882a593Smuzhiyun 			     __alignof__(struct scatterlist));
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
esp_ssg_unref(struct xfrm_state * x,void * tmp)115*4882a593Smuzhiyun static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
118*4882a593Smuzhiyun 	struct crypto_aead *aead = x->data;
119*4882a593Smuzhiyun 	int extralen = 0;
120*4882a593Smuzhiyun 	u8 *iv;
121*4882a593Smuzhiyun 	struct aead_request *req;
122*4882a593Smuzhiyun 	struct scatterlist *sg;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (x->props.flags & XFRM_STATE_ESN)
125*4882a593Smuzhiyun 		extralen += sizeof(*extra);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	iv = esp_tmp_iv(aead, tmp, extralen);
128*4882a593Smuzhiyun 	req = esp_tmp_req(aead, iv);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Unref skb_frag_pages in the src scatterlist if necessary.
131*4882a593Smuzhiyun 	 * Skip the first sg which comes from skb->data.
132*4882a593Smuzhiyun 	 */
133*4882a593Smuzhiyun 	if (req->src != req->dst)
134*4882a593Smuzhiyun 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
135*4882a593Smuzhiyun 			put_page(sg_page(sg));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #ifdef CONFIG_INET6_ESPINTCP
139*4882a593Smuzhiyun struct esp_tcp_sk {
140*4882a593Smuzhiyun 	struct sock *sk;
141*4882a593Smuzhiyun 	struct rcu_head rcu;
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
esp_free_tcp_sk(struct rcu_head * head)144*4882a593Smuzhiyun static void esp_free_tcp_sk(struct rcu_head *head)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	sock_put(esk->sk);
149*4882a593Smuzhiyun 	kfree(esk);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
esp6_find_tcp_sk(struct xfrm_state * x)152*4882a593Smuzhiyun static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct xfrm_encap_tmpl *encap = x->encap;
155*4882a593Smuzhiyun 	struct esp_tcp_sk *esk;
156*4882a593Smuzhiyun 	__be16 sport, dport;
157*4882a593Smuzhiyun 	struct sock *nsk;
158*4882a593Smuzhiyun 	struct sock *sk;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	sk = rcu_dereference(x->encap_sk);
161*4882a593Smuzhiyun 	if (sk && sk->sk_state == TCP_ESTABLISHED)
162*4882a593Smuzhiyun 		return sk;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	spin_lock_bh(&x->lock);
165*4882a593Smuzhiyun 	sport = encap->encap_sport;
166*4882a593Smuzhiyun 	dport = encap->encap_dport;
167*4882a593Smuzhiyun 	nsk = rcu_dereference_protected(x->encap_sk,
168*4882a593Smuzhiyun 					lockdep_is_held(&x->lock));
169*4882a593Smuzhiyun 	if (sk && sk == nsk) {
170*4882a593Smuzhiyun 		esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
171*4882a593Smuzhiyun 		if (!esk) {
172*4882a593Smuzhiyun 			spin_unlock_bh(&x->lock);
173*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
174*4882a593Smuzhiyun 		}
175*4882a593Smuzhiyun 		RCU_INIT_POINTER(x->encap_sk, NULL);
176*4882a593Smuzhiyun 		esk->sk = sk;
177*4882a593Smuzhiyun 		call_rcu(&esk->rcu, esp_free_tcp_sk);
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 	spin_unlock_bh(&x->lock);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6,
182*4882a593Smuzhiyun 					dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
183*4882a593Smuzhiyun 	if (!sk)
184*4882a593Smuzhiyun 		return ERR_PTR(-ENOENT);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (!tcp_is_ulp_esp(sk)) {
187*4882a593Smuzhiyun 		sock_put(sk);
188*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	spin_lock_bh(&x->lock);
192*4882a593Smuzhiyun 	nsk = rcu_dereference_protected(x->encap_sk,
193*4882a593Smuzhiyun 					lockdep_is_held(&x->lock));
194*4882a593Smuzhiyun 	if (encap->encap_sport != sport ||
195*4882a593Smuzhiyun 	    encap->encap_dport != dport) {
196*4882a593Smuzhiyun 		sock_put(sk);
197*4882a593Smuzhiyun 		sk = nsk ?: ERR_PTR(-EREMCHG);
198*4882a593Smuzhiyun 	} else if (sk == nsk) {
199*4882a593Smuzhiyun 		sock_put(sk);
200*4882a593Smuzhiyun 	} else {
201*4882a593Smuzhiyun 		rcu_assign_pointer(x->encap_sk, sk);
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 	spin_unlock_bh(&x->lock);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return sk;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)208*4882a593Smuzhiyun static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct sock *sk;
211*4882a593Smuzhiyun 	int err;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	rcu_read_lock();
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	sk = esp6_find_tcp_sk(x);
216*4882a593Smuzhiyun 	err = PTR_ERR_OR_ZERO(sk);
217*4882a593Smuzhiyun 	if (err)
218*4882a593Smuzhiyun 		goto out;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	bh_lock_sock(sk);
221*4882a593Smuzhiyun 	if (sock_owned_by_user(sk))
222*4882a593Smuzhiyun 		err = espintcp_queue_out(sk, skb);
223*4882a593Smuzhiyun 	else
224*4882a593Smuzhiyun 		err = espintcp_push_skb(sk, skb);
225*4882a593Smuzhiyun 	bh_unlock_sock(sk);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun out:
228*4882a593Smuzhiyun 	rcu_read_unlock();
229*4882a593Smuzhiyun 	return err;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)232*4882a593Smuzhiyun static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
233*4882a593Smuzhiyun 				   struct sk_buff *skb)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	struct dst_entry *dst = skb_dst(skb);
236*4882a593Smuzhiyun 	struct xfrm_state *x = dst->xfrm;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return esp_output_tcp_finish(x, skb);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)241*4882a593Smuzhiyun static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	int err;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	local_bh_disable();
246*4882a593Smuzhiyun 	err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
247*4882a593Smuzhiyun 	local_bh_enable();
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* EINPROGRESS just happens to do the right thing.  It
250*4882a593Smuzhiyun 	 * actually means that the skb has been consumed and
251*4882a593Smuzhiyun 	 * isn't coming back.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 	return err ?: -EINPROGRESS;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)256*4882a593Smuzhiyun static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	kfree_skb(skb);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return -EOPNOTSUPP;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun #endif
263*4882a593Smuzhiyun 
esp_output_encap_csum(struct sk_buff * skb)264*4882a593Smuzhiyun static void esp_output_encap_csum(struct sk_buff *skb)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	/* UDP encap with IPv6 requires a valid checksum */
267*4882a593Smuzhiyun 	if (*skb_mac_header(skb) == IPPROTO_UDP) {
268*4882a593Smuzhiyun 		struct udphdr *uh = udp_hdr(skb);
269*4882a593Smuzhiyun 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
270*4882a593Smuzhiyun 		int len = ntohs(uh->len);
271*4882a593Smuzhiyun 		unsigned int offset = skb_transport_offset(skb);
272*4882a593Smuzhiyun 		__wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
275*4882a593Smuzhiyun 					    len, IPPROTO_UDP, csum);
276*4882a593Smuzhiyun 		if (uh->check == 0)
277*4882a593Smuzhiyun 			uh->check = CSUM_MANGLED_0;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
esp_output_done(struct crypto_async_request * base,int err)281*4882a593Smuzhiyun static void esp_output_done(struct crypto_async_request *base, int err)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct sk_buff *skb = base->data;
284*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
285*4882a593Smuzhiyun 	void *tmp;
286*4882a593Smuzhiyun 	struct xfrm_state *x;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
289*4882a593Smuzhiyun 		struct sec_path *sp = skb_sec_path(skb);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		x = sp->xvec[sp->len - 1];
292*4882a593Smuzhiyun 	} else {
293*4882a593Smuzhiyun 		x = skb_dst(skb)->xfrm;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	tmp = ESP_SKB_CB(skb)->tmp;
297*4882a593Smuzhiyun 	esp_ssg_unref(x, tmp);
298*4882a593Smuzhiyun 	kfree(tmp);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	esp_output_encap_csum(skb);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
303*4882a593Smuzhiyun 		if (err) {
304*4882a593Smuzhiyun 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
305*4882a593Smuzhiyun 			kfree_skb(skb);
306*4882a593Smuzhiyun 			return;
307*4882a593Smuzhiyun 		}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		skb_push(skb, skb->data - skb_mac_header(skb));
310*4882a593Smuzhiyun 		secpath_reset(skb);
311*4882a593Smuzhiyun 		xfrm_dev_resume(skb);
312*4882a593Smuzhiyun 	} else {
313*4882a593Smuzhiyun 		if (!err &&
314*4882a593Smuzhiyun 		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
315*4882a593Smuzhiyun 			esp_output_tail_tcp(x, skb);
316*4882a593Smuzhiyun 		else
317*4882a593Smuzhiyun 			xfrm_output_resume(skb, err);
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)322*4882a593Smuzhiyun static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
325*4882a593Smuzhiyun 	void *tmp = ESP_SKB_CB(skb)->tmp;
326*4882a593Smuzhiyun 	__be32 *seqhi = esp_tmp_extra(tmp);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	esph->seq_no = esph->spi;
329*4882a593Smuzhiyun 	esph->spi = *seqhi;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
esp_output_restore_header(struct sk_buff * skb)332*4882a593Smuzhiyun static void esp_output_restore_header(struct sk_buff *skb)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	void *tmp = ESP_SKB_CB(skb)->tmp;
335*4882a593Smuzhiyun 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
338*4882a593Smuzhiyun 				sizeof(__be32));
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
esp_output_set_esn(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)341*4882a593Smuzhiyun static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
342*4882a593Smuzhiyun 					     struct xfrm_state *x,
343*4882a593Smuzhiyun 					     struct ip_esp_hdr *esph,
344*4882a593Smuzhiyun 					     struct esp_output_extra *extra)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	/* For ESN we move the header forward by 4 bytes to
347*4882a593Smuzhiyun 	 * accomodate the high bits.  We will move it back after
348*4882a593Smuzhiyun 	 * encryption.
349*4882a593Smuzhiyun 	 */
350*4882a593Smuzhiyun 	if ((x->props.flags & XFRM_STATE_ESN)) {
351*4882a593Smuzhiyun 		__u32 seqhi;
352*4882a593Smuzhiyun 		struct xfrm_offload *xo = xfrm_offload(skb);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		if (xo)
355*4882a593Smuzhiyun 			seqhi = xo->seq.hi;
356*4882a593Smuzhiyun 		else
357*4882a593Smuzhiyun 			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		extra->esphoff = (unsigned char *)esph -
360*4882a593Smuzhiyun 				 skb_transport_header(skb);
361*4882a593Smuzhiyun 		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
362*4882a593Smuzhiyun 		extra->seqhi = esph->spi;
363*4882a593Smuzhiyun 		esph->seq_no = htonl(seqhi);
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	esph->spi = x->id.spi;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	return esph;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
esp_output_done_esn(struct crypto_async_request * base,int err)371*4882a593Smuzhiyun static void esp_output_done_esn(struct crypto_async_request *base, int err)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	struct sk_buff *skb = base->data;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	esp_output_restore_header(skb);
376*4882a593Smuzhiyun 	esp_output_done(base, err);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
esp6_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)379*4882a593Smuzhiyun static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
380*4882a593Smuzhiyun 					       int encap_type,
381*4882a593Smuzhiyun 					       struct esp_info *esp,
382*4882a593Smuzhiyun 					       __be16 sport,
383*4882a593Smuzhiyun 					       __be16 dport)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	struct udphdr *uh;
386*4882a593Smuzhiyun 	__be32 *udpdata32;
387*4882a593Smuzhiyun 	unsigned int len;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	len = skb->len + esp->tailen - skb_transport_offset(skb);
390*4882a593Smuzhiyun 	if (len > U16_MAX)
391*4882a593Smuzhiyun 		return ERR_PTR(-EMSGSIZE);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	uh = (struct udphdr *)esp->esph;
394*4882a593Smuzhiyun 	uh->source = sport;
395*4882a593Smuzhiyun 	uh->dest = dport;
396*4882a593Smuzhiyun 	uh->len = htons(len);
397*4882a593Smuzhiyun 	uh->check = 0;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	*skb_mac_header(skb) = IPPROTO_UDP;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
402*4882a593Smuzhiyun 		udpdata32 = (__be32 *)(uh + 1);
403*4882a593Smuzhiyun 		udpdata32[0] = udpdata32[1] = 0;
404*4882a593Smuzhiyun 		return (struct ip_esp_hdr *)(udpdata32 + 2);
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	return (struct ip_esp_hdr *)(uh + 1);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun #ifdef CONFIG_INET6_ESPINTCP
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)411*4882a593Smuzhiyun static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
412*4882a593Smuzhiyun 						struct sk_buff *skb,
413*4882a593Smuzhiyun 						struct esp_info *esp)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	__be16 *lenp = (void *)esp->esph;
416*4882a593Smuzhiyun 	struct ip_esp_hdr *esph;
417*4882a593Smuzhiyun 	unsigned int len;
418*4882a593Smuzhiyun 	struct sock *sk;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	len = skb->len + esp->tailen - skb_transport_offset(skb);
421*4882a593Smuzhiyun 	if (len > IP_MAX_MTU)
422*4882a593Smuzhiyun 		return ERR_PTR(-EMSGSIZE);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	rcu_read_lock();
425*4882a593Smuzhiyun 	sk = esp6_find_tcp_sk(x);
426*4882a593Smuzhiyun 	rcu_read_unlock();
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (IS_ERR(sk))
429*4882a593Smuzhiyun 		return ERR_CAST(sk);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	*lenp = htons(len);
432*4882a593Smuzhiyun 	esph = (struct ip_esp_hdr *)(lenp + 1);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	return esph;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun #else
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)437*4882a593Smuzhiyun static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
438*4882a593Smuzhiyun 						struct sk_buff *skb,
439*4882a593Smuzhiyun 						struct esp_info *esp)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	return ERR_PTR(-EOPNOTSUPP);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun #endif
444*4882a593Smuzhiyun 
esp6_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)445*4882a593Smuzhiyun static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
446*4882a593Smuzhiyun 			    struct esp_info *esp)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct xfrm_encap_tmpl *encap = x->encap;
449*4882a593Smuzhiyun 	struct ip_esp_hdr *esph;
450*4882a593Smuzhiyun 	__be16 sport, dport;
451*4882a593Smuzhiyun 	int encap_type;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	spin_lock_bh(&x->lock);
454*4882a593Smuzhiyun 	sport = encap->encap_sport;
455*4882a593Smuzhiyun 	dport = encap->encap_dport;
456*4882a593Smuzhiyun 	encap_type = encap->encap_type;
457*4882a593Smuzhiyun 	spin_unlock_bh(&x->lock);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	switch (encap_type) {
460*4882a593Smuzhiyun 	default:
461*4882a593Smuzhiyun 	case UDP_ENCAP_ESPINUDP:
462*4882a593Smuzhiyun 	case UDP_ENCAP_ESPINUDP_NON_IKE:
463*4882a593Smuzhiyun 		esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
464*4882a593Smuzhiyun 		break;
465*4882a593Smuzhiyun 	case TCP_ENCAP_ESPINTCP:
466*4882a593Smuzhiyun 		esph = esp6_output_tcp_encap(x, skb, esp);
467*4882a593Smuzhiyun 		break;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	if (IS_ERR(esph))
471*4882a593Smuzhiyun 		return PTR_ERR(esph);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	esp->esph = esph;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
esp6_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)478*4882a593Smuzhiyun int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	u8 *tail;
481*4882a593Smuzhiyun 	int nfrags;
482*4882a593Smuzhiyun 	int esph_offset;
483*4882a593Smuzhiyun 	struct page *page;
484*4882a593Smuzhiyun 	struct sk_buff *trailer;
485*4882a593Smuzhiyun 	int tailen = esp->tailen;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (x->encap) {
488*4882a593Smuzhiyun 		int err = esp6_output_encap(x, skb, esp);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		if (err < 0)
491*4882a593Smuzhiyun 			return err;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
495*4882a593Smuzhiyun 	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
496*4882a593Smuzhiyun 		goto cow;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	if (!skb_cloned(skb)) {
499*4882a593Smuzhiyun 		if (tailen <= skb_tailroom(skb)) {
500*4882a593Smuzhiyun 			nfrags = 1;
501*4882a593Smuzhiyun 			trailer = skb;
502*4882a593Smuzhiyun 			tail = skb_tail_pointer(trailer);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 			goto skip_cow;
505*4882a593Smuzhiyun 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
506*4882a593Smuzhiyun 			   && !skb_has_frag_list(skb)) {
507*4882a593Smuzhiyun 			int allocsize;
508*4882a593Smuzhiyun 			struct sock *sk = skb->sk;
509*4882a593Smuzhiyun 			struct page_frag *pfrag = &x->xfrag;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 			esp->inplace = false;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 			spin_lock_bh(&x->lock);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
518*4882a593Smuzhiyun 				spin_unlock_bh(&x->lock);
519*4882a593Smuzhiyun 				goto cow;
520*4882a593Smuzhiyun 			}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 			page = pfrag->page;
523*4882a593Smuzhiyun 			get_page(page);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 			tail = page_address(page) + pfrag->offset;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 			nfrags = skb_shinfo(skb)->nr_frags;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
532*4882a593Smuzhiyun 					     tailen);
533*4882a593Smuzhiyun 			skb_shinfo(skb)->nr_frags = ++nfrags;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 			pfrag->offset = pfrag->offset + allocsize;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 			spin_unlock_bh(&x->lock);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 			nfrags++;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 			skb->len += tailen;
542*4882a593Smuzhiyun 			skb->data_len += tailen;
543*4882a593Smuzhiyun 			skb->truesize += tailen;
544*4882a593Smuzhiyun 			if (sk && sk_fullsock(sk))
545*4882a593Smuzhiyun 				refcount_add(tailen, &sk->sk_wmem_alloc);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 			goto out;
548*4882a593Smuzhiyun 		}
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun cow:
552*4882a593Smuzhiyun 	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	nfrags = skb_cow_data(skb, tailen, &trailer);
555*4882a593Smuzhiyun 	if (nfrags < 0)
556*4882a593Smuzhiyun 		goto out;
557*4882a593Smuzhiyun 	tail = skb_tail_pointer(trailer);
558*4882a593Smuzhiyun 	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun skip_cow:
561*4882a593Smuzhiyun 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
562*4882a593Smuzhiyun 	pskb_put(skb, trailer, tailen);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun out:
565*4882a593Smuzhiyun 	return nfrags;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(esp6_output_head);
568*4882a593Smuzhiyun 
esp6_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)569*4882a593Smuzhiyun int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	u8 *iv;
572*4882a593Smuzhiyun 	int alen;
573*4882a593Smuzhiyun 	void *tmp;
574*4882a593Smuzhiyun 	int ivlen;
575*4882a593Smuzhiyun 	int assoclen;
576*4882a593Smuzhiyun 	int extralen;
577*4882a593Smuzhiyun 	struct page *page;
578*4882a593Smuzhiyun 	struct ip_esp_hdr *esph;
579*4882a593Smuzhiyun 	struct aead_request *req;
580*4882a593Smuzhiyun 	struct crypto_aead *aead;
581*4882a593Smuzhiyun 	struct scatterlist *sg, *dsg;
582*4882a593Smuzhiyun 	struct esp_output_extra *extra;
583*4882a593Smuzhiyun 	int err = -ENOMEM;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	assoclen = sizeof(struct ip_esp_hdr);
586*4882a593Smuzhiyun 	extralen = 0;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (x->props.flags & XFRM_STATE_ESN) {
589*4882a593Smuzhiyun 		extralen += sizeof(*extra);
590*4882a593Smuzhiyun 		assoclen += sizeof(__be32);
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	aead = x->data;
594*4882a593Smuzhiyun 	alen = crypto_aead_authsize(aead);
595*4882a593Smuzhiyun 	ivlen = crypto_aead_ivsize(aead);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
598*4882a593Smuzhiyun 	if (!tmp)
599*4882a593Smuzhiyun 		goto error;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	extra = esp_tmp_extra(tmp);
602*4882a593Smuzhiyun 	iv = esp_tmp_iv(aead, tmp, extralen);
603*4882a593Smuzhiyun 	req = esp_tmp_req(aead, iv);
604*4882a593Smuzhiyun 	sg = esp_req_sg(aead, req);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	if (esp->inplace)
607*4882a593Smuzhiyun 		dsg = sg;
608*4882a593Smuzhiyun 	else
609*4882a593Smuzhiyun 		dsg = &sg[esp->nfrags];
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	esph = esp_output_set_esn(skb, x, esp->esph, extra);
612*4882a593Smuzhiyun 	esp->esph = esph;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	sg_init_table(sg, esp->nfrags);
615*4882a593Smuzhiyun 	err = skb_to_sgvec(skb, sg,
616*4882a593Smuzhiyun 		           (unsigned char *)esph - skb->data,
617*4882a593Smuzhiyun 		           assoclen + ivlen + esp->clen + alen);
618*4882a593Smuzhiyun 	if (unlikely(err < 0))
619*4882a593Smuzhiyun 		goto error_free;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (!esp->inplace) {
622*4882a593Smuzhiyun 		int allocsize;
623*4882a593Smuzhiyun 		struct page_frag *pfrag = &x->xfrag;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		spin_lock_bh(&x->lock);
628*4882a593Smuzhiyun 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
629*4882a593Smuzhiyun 			spin_unlock_bh(&x->lock);
630*4882a593Smuzhiyun 			goto error_free;
631*4882a593Smuzhiyun 		}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		skb_shinfo(skb)->nr_frags = 1;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		page = pfrag->page;
636*4882a593Smuzhiyun 		get_page(page);
637*4882a593Smuzhiyun 		/* replace page frags in skb with new page */
638*4882a593Smuzhiyun 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
639*4882a593Smuzhiyun 		pfrag->offset = pfrag->offset + allocsize;
640*4882a593Smuzhiyun 		spin_unlock_bh(&x->lock);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
643*4882a593Smuzhiyun 		err = skb_to_sgvec(skb, dsg,
644*4882a593Smuzhiyun 			           (unsigned char *)esph - skb->data,
645*4882a593Smuzhiyun 			           assoclen + ivlen + esp->clen + alen);
646*4882a593Smuzhiyun 		if (unlikely(err < 0))
647*4882a593Smuzhiyun 			goto error_free;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if ((x->props.flags & XFRM_STATE_ESN))
651*4882a593Smuzhiyun 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
652*4882a593Smuzhiyun 	else
653*4882a593Smuzhiyun 		aead_request_set_callback(req, 0, esp_output_done, skb);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
656*4882a593Smuzhiyun 	aead_request_set_ad(req, assoclen);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	memset(iv, 0, ivlen);
659*4882a593Smuzhiyun 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
660*4882a593Smuzhiyun 	       min(ivlen, 8));
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	ESP_SKB_CB(skb)->tmp = tmp;
663*4882a593Smuzhiyun 	err = crypto_aead_encrypt(req);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	switch (err) {
666*4882a593Smuzhiyun 	case -EINPROGRESS:
667*4882a593Smuzhiyun 		goto error;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	case -ENOSPC:
670*4882a593Smuzhiyun 		err = NET_XMIT_DROP;
671*4882a593Smuzhiyun 		break;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	case 0:
674*4882a593Smuzhiyun 		if ((x->props.flags & XFRM_STATE_ESN))
675*4882a593Smuzhiyun 			esp_output_restore_header(skb);
676*4882a593Smuzhiyun 		esp_output_encap_csum(skb);
677*4882a593Smuzhiyun 	}
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	if (sg != dsg)
680*4882a593Smuzhiyun 		esp_ssg_unref(x, tmp);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
683*4882a593Smuzhiyun 		err = esp_output_tail_tcp(x, skb);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun error_free:
686*4882a593Smuzhiyun 	kfree(tmp);
687*4882a593Smuzhiyun error:
688*4882a593Smuzhiyun 	return err;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(esp6_output_tail);
691*4882a593Smuzhiyun 
esp6_output(struct xfrm_state * x,struct sk_buff * skb)692*4882a593Smuzhiyun static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	int alen;
695*4882a593Smuzhiyun 	int blksize;
696*4882a593Smuzhiyun 	struct ip_esp_hdr *esph;
697*4882a593Smuzhiyun 	struct crypto_aead *aead;
698*4882a593Smuzhiyun 	struct esp_info esp;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	esp.inplace = true;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	esp.proto = *skb_mac_header(skb);
703*4882a593Smuzhiyun 	*skb_mac_header(skb) = IPPROTO_ESP;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	/* skb is pure payload to encrypt */
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	aead = x->data;
708*4882a593Smuzhiyun 	alen = crypto_aead_authsize(aead);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	esp.tfclen = 0;
711*4882a593Smuzhiyun 	if (x->tfcpad) {
712*4882a593Smuzhiyun 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
713*4882a593Smuzhiyun 		u32 padto;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
716*4882a593Smuzhiyun 		if (skb->len < padto)
717*4882a593Smuzhiyun 			esp.tfclen = padto - skb->len;
718*4882a593Smuzhiyun 	}
719*4882a593Smuzhiyun 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
720*4882a593Smuzhiyun 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
721*4882a593Smuzhiyun 	esp.plen = esp.clen - skb->len - esp.tfclen;
722*4882a593Smuzhiyun 	esp.tailen = esp.tfclen + esp.plen + alen;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	esp.esph = ip_esp_hdr(skb);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	esp.nfrags = esp6_output_head(x, skb, &esp);
727*4882a593Smuzhiyun 	if (esp.nfrags < 0)
728*4882a593Smuzhiyun 		return esp.nfrags;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	esph = esp.esph;
731*4882a593Smuzhiyun 	esph->spi = x->id.spi;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
734*4882a593Smuzhiyun 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
735*4882a593Smuzhiyun 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	skb_push(skb, -skb_network_offset(skb));
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	return esp6_output_tail(x, skb, &esp);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
esp_remove_trailer(struct sk_buff * skb)742*4882a593Smuzhiyun static inline int esp_remove_trailer(struct sk_buff *skb)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	struct xfrm_state *x = xfrm_input_state(skb);
745*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
746*4882a593Smuzhiyun 	struct crypto_aead *aead = x->data;
747*4882a593Smuzhiyun 	int alen, hlen, elen;
748*4882a593Smuzhiyun 	int padlen, trimlen;
749*4882a593Smuzhiyun 	__wsum csumdiff;
750*4882a593Smuzhiyun 	u8 nexthdr[2];
751*4882a593Smuzhiyun 	int ret;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	alen = crypto_aead_authsize(aead);
754*4882a593Smuzhiyun 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
755*4882a593Smuzhiyun 	elen = skb->len - hlen;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
758*4882a593Smuzhiyun 		ret = xo->proto;
759*4882a593Smuzhiyun 		goto out;
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
763*4882a593Smuzhiyun 	BUG_ON(ret);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	ret = -EINVAL;
766*4882a593Smuzhiyun 	padlen = nexthdr[0];
767*4882a593Smuzhiyun 	if (padlen + 2 + alen >= elen) {
768*4882a593Smuzhiyun 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
769*4882a593Smuzhiyun 				    padlen + 2, elen - alen);
770*4882a593Smuzhiyun 		goto out;
771*4882a593Smuzhiyun 	}
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	trimlen = alen + padlen + 2;
774*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
775*4882a593Smuzhiyun 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
776*4882a593Smuzhiyun 		skb->csum = csum_block_sub(skb->csum, csumdiff,
777*4882a593Smuzhiyun 					   skb->len - trimlen);
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun 	pskb_trim(skb, skb->len - trimlen);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	ret = nexthdr[1];
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun out:
784*4882a593Smuzhiyun 	return ret;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
esp6_input_done2(struct sk_buff * skb,int err)787*4882a593Smuzhiyun int esp6_input_done2(struct sk_buff *skb, int err)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	struct xfrm_state *x = xfrm_input_state(skb);
790*4882a593Smuzhiyun 	struct xfrm_offload *xo = xfrm_offload(skb);
791*4882a593Smuzhiyun 	struct crypto_aead *aead = x->data;
792*4882a593Smuzhiyun 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
793*4882a593Smuzhiyun 	int hdr_len = skb_network_header_len(skb);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
796*4882a593Smuzhiyun 		kfree(ESP_SKB_CB(skb)->tmp);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	if (unlikely(err))
799*4882a593Smuzhiyun 		goto out;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	err = esp_remove_trailer(skb);
802*4882a593Smuzhiyun 	if (unlikely(err < 0))
803*4882a593Smuzhiyun 		goto out;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (x->encap) {
806*4882a593Smuzhiyun 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
807*4882a593Smuzhiyun 		int offset = skb_network_offset(skb) + sizeof(*ip6h);
808*4882a593Smuzhiyun 		struct xfrm_encap_tmpl *encap = x->encap;
809*4882a593Smuzhiyun 		u8 nexthdr = ip6h->nexthdr;
810*4882a593Smuzhiyun 		__be16 frag_off, source;
811*4882a593Smuzhiyun 		struct udphdr *uh;
812*4882a593Smuzhiyun 		struct tcphdr *th;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 		offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
815*4882a593Smuzhiyun 		if (offset == -1) {
816*4882a593Smuzhiyun 			err = -EINVAL;
817*4882a593Smuzhiyun 			goto out;
818*4882a593Smuzhiyun 		}
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 		uh = (void *)(skb->data + offset);
821*4882a593Smuzhiyun 		th = (void *)(skb->data + offset);
822*4882a593Smuzhiyun 		hdr_len += offset;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 		switch (x->encap->encap_type) {
825*4882a593Smuzhiyun 		case TCP_ENCAP_ESPINTCP:
826*4882a593Smuzhiyun 			source = th->source;
827*4882a593Smuzhiyun 			break;
828*4882a593Smuzhiyun 		case UDP_ENCAP_ESPINUDP:
829*4882a593Smuzhiyun 		case UDP_ENCAP_ESPINUDP_NON_IKE:
830*4882a593Smuzhiyun 			source = uh->source;
831*4882a593Smuzhiyun 			break;
832*4882a593Smuzhiyun 		default:
833*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
834*4882a593Smuzhiyun 			err = -EINVAL;
835*4882a593Smuzhiyun 			goto out;
836*4882a593Smuzhiyun 		}
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		/*
839*4882a593Smuzhiyun 		 * 1) if the NAT-T peer's IP or port changed then
840*4882a593Smuzhiyun 		 *    advertize the change to the keying daemon.
841*4882a593Smuzhiyun 		 *    This is an inbound SA, so just compare
842*4882a593Smuzhiyun 		 *    SRC ports.
843*4882a593Smuzhiyun 		 */
844*4882a593Smuzhiyun 		if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
845*4882a593Smuzhiyun 		    source != encap->encap_sport) {
846*4882a593Smuzhiyun 			xfrm_address_t ipaddr;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 			memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
849*4882a593Smuzhiyun 			km_new_mapping(x, &ipaddr, source);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 			/* XXX: perhaps add an extra
852*4882a593Smuzhiyun 			 * policy check here, to see
853*4882a593Smuzhiyun 			 * if we should allow or
854*4882a593Smuzhiyun 			 * reject a packet from a
855*4882a593Smuzhiyun 			 * different source
856*4882a593Smuzhiyun 			 * address/port.
857*4882a593Smuzhiyun 			 */
858*4882a593Smuzhiyun 		}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		/*
861*4882a593Smuzhiyun 		 * 2) ignore UDP/TCP checksums in case
862*4882a593Smuzhiyun 		 *    of NAT-T in Transport Mode, or
863*4882a593Smuzhiyun 		 *    perform other post-processing fixes
864*4882a593Smuzhiyun 		 *    as per draft-ietf-ipsec-udp-encaps-06,
865*4882a593Smuzhiyun 		 *    section 3.1.2
866*4882a593Smuzhiyun 		 */
867*4882a593Smuzhiyun 		if (x->props.mode == XFRM_MODE_TRANSPORT)
868*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_UNNECESSARY;
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	skb_postpull_rcsum(skb, skb_network_header(skb),
872*4882a593Smuzhiyun 			   skb_network_header_len(skb));
873*4882a593Smuzhiyun 	skb_pull_rcsum(skb, hlen);
874*4882a593Smuzhiyun 	if (x->props.mode == XFRM_MODE_TUNNEL)
875*4882a593Smuzhiyun 		skb_reset_transport_header(skb);
876*4882a593Smuzhiyun 	else
877*4882a593Smuzhiyun 		skb_set_transport_header(skb, -hdr_len);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	/* RFC4303: Drop dummy packets without any error */
880*4882a593Smuzhiyun 	if (err == IPPROTO_NONE)
881*4882a593Smuzhiyun 		err = -EINVAL;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun out:
884*4882a593Smuzhiyun 	return err;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(esp6_input_done2);
887*4882a593Smuzhiyun 
esp_input_done(struct crypto_async_request * base,int err)888*4882a593Smuzhiyun static void esp_input_done(struct crypto_async_request *base, int err)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	struct sk_buff *skb = base->data;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
esp_input_restore_header(struct sk_buff * skb)895*4882a593Smuzhiyun static void esp_input_restore_header(struct sk_buff *skb)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun 	esp_restore_header(skb, 0);
898*4882a593Smuzhiyun 	__skb_pull(skb, 4);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)901*4882a593Smuzhiyun static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct xfrm_state *x = xfrm_input_state(skb);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/* For ESN we move the header forward by 4 bytes to
906*4882a593Smuzhiyun 	 * accomodate the high bits.  We will move it back after
907*4882a593Smuzhiyun 	 * decryption.
908*4882a593Smuzhiyun 	 */
909*4882a593Smuzhiyun 	if ((x->props.flags & XFRM_STATE_ESN)) {
910*4882a593Smuzhiyun 		struct ip_esp_hdr *esph = skb_push(skb, 4);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 		*seqhi = esph->spi;
913*4882a593Smuzhiyun 		esph->spi = esph->seq_no;
914*4882a593Smuzhiyun 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
esp_input_done_esn(struct crypto_async_request * base,int err)918*4882a593Smuzhiyun static void esp_input_done_esn(struct crypto_async_request *base, int err)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	struct sk_buff *skb = base->data;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	esp_input_restore_header(skb);
923*4882a593Smuzhiyun 	esp_input_done(base, err);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
esp6_input(struct xfrm_state * x,struct sk_buff * skb)926*4882a593Smuzhiyun static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	struct crypto_aead *aead = x->data;
929*4882a593Smuzhiyun 	struct aead_request *req;
930*4882a593Smuzhiyun 	struct sk_buff *trailer;
931*4882a593Smuzhiyun 	int ivlen = crypto_aead_ivsize(aead);
932*4882a593Smuzhiyun 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
933*4882a593Smuzhiyun 	int nfrags;
934*4882a593Smuzhiyun 	int assoclen;
935*4882a593Smuzhiyun 	int seqhilen;
936*4882a593Smuzhiyun 	int ret = 0;
937*4882a593Smuzhiyun 	void *tmp;
938*4882a593Smuzhiyun 	__be32 *seqhi;
939*4882a593Smuzhiyun 	u8 *iv;
940*4882a593Smuzhiyun 	struct scatterlist *sg;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
943*4882a593Smuzhiyun 		ret = -EINVAL;
944*4882a593Smuzhiyun 		goto out;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	if (elen <= 0) {
948*4882a593Smuzhiyun 		ret = -EINVAL;
949*4882a593Smuzhiyun 		goto out;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	assoclen = sizeof(struct ip_esp_hdr);
953*4882a593Smuzhiyun 	seqhilen = 0;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	if (x->props.flags & XFRM_STATE_ESN) {
956*4882a593Smuzhiyun 		seqhilen += sizeof(__be32);
957*4882a593Smuzhiyun 		assoclen += seqhilen;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	if (!skb_cloned(skb)) {
961*4882a593Smuzhiyun 		if (!skb_is_nonlinear(skb)) {
962*4882a593Smuzhiyun 			nfrags = 1;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 			goto skip_cow;
965*4882a593Smuzhiyun 		} else if (!skb_has_frag_list(skb)) {
966*4882a593Smuzhiyun 			nfrags = skb_shinfo(skb)->nr_frags;
967*4882a593Smuzhiyun 			nfrags++;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 			goto skip_cow;
970*4882a593Smuzhiyun 		}
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	nfrags = skb_cow_data(skb, 0, &trailer);
974*4882a593Smuzhiyun 	if (nfrags < 0) {
975*4882a593Smuzhiyun 		ret = -EINVAL;
976*4882a593Smuzhiyun 		goto out;
977*4882a593Smuzhiyun 	}
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun skip_cow:
980*4882a593Smuzhiyun 	ret = -ENOMEM;
981*4882a593Smuzhiyun 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
982*4882a593Smuzhiyun 	if (!tmp)
983*4882a593Smuzhiyun 		goto out;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	ESP_SKB_CB(skb)->tmp = tmp;
986*4882a593Smuzhiyun 	seqhi = esp_tmp_extra(tmp);
987*4882a593Smuzhiyun 	iv = esp_tmp_iv(aead, tmp, seqhilen);
988*4882a593Smuzhiyun 	req = esp_tmp_req(aead, iv);
989*4882a593Smuzhiyun 	sg = esp_req_sg(aead, req);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	esp_input_set_header(skb, seqhi);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	sg_init_table(sg, nfrags);
994*4882a593Smuzhiyun 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
995*4882a593Smuzhiyun 	if (unlikely(ret < 0)) {
996*4882a593Smuzhiyun 		kfree(tmp);
997*4882a593Smuzhiyun 		goto out;
998*4882a593Smuzhiyun 	}
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_NONE;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	if ((x->props.flags & XFRM_STATE_ESN))
1003*4882a593Smuzhiyun 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
1004*4882a593Smuzhiyun 	else
1005*4882a593Smuzhiyun 		aead_request_set_callback(req, 0, esp_input_done, skb);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
1008*4882a593Smuzhiyun 	aead_request_set_ad(req, assoclen);
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	ret = crypto_aead_decrypt(req);
1011*4882a593Smuzhiyun 	if (ret == -EINPROGRESS)
1012*4882a593Smuzhiyun 		goto out;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	if ((x->props.flags & XFRM_STATE_ESN))
1015*4882a593Smuzhiyun 		esp_input_restore_header(skb);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	ret = esp6_input_done2(skb, ret);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun out:
1020*4882a593Smuzhiyun 	return ret;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun 
esp6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)1023*4882a593Smuzhiyun static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1024*4882a593Smuzhiyun 		    u8 type, u8 code, int offset, __be32 info)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun 	struct net *net = dev_net(skb->dev);
1027*4882a593Smuzhiyun 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1028*4882a593Smuzhiyun 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1029*4882a593Smuzhiyun 	struct xfrm_state *x;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	if (type != ICMPV6_PKT_TOOBIG &&
1032*4882a593Smuzhiyun 	    type != NDISC_REDIRECT)
1033*4882a593Smuzhiyun 		return 0;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1036*4882a593Smuzhiyun 			      esph->spi, IPPROTO_ESP, AF_INET6);
1037*4882a593Smuzhiyun 	if (!x)
1038*4882a593Smuzhiyun 		return 0;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	if (type == NDISC_REDIRECT)
1041*4882a593Smuzhiyun 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
1042*4882a593Smuzhiyun 			     sock_net_uid(net, NULL));
1043*4882a593Smuzhiyun 	else
1044*4882a593Smuzhiyun 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1045*4882a593Smuzhiyun 	xfrm_state_put(x);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	return 0;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
esp6_destroy(struct xfrm_state * x)1050*4882a593Smuzhiyun static void esp6_destroy(struct xfrm_state *x)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	struct crypto_aead *aead = x->data;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	if (!aead)
1055*4882a593Smuzhiyun 		return;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	crypto_free_aead(aead);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
esp_init_aead(struct xfrm_state * x)1060*4882a593Smuzhiyun static int esp_init_aead(struct xfrm_state *x)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun 	char aead_name[CRYPTO_MAX_ALG_NAME];
1063*4882a593Smuzhiyun 	struct crypto_aead *aead;
1064*4882a593Smuzhiyun 	int err;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	err = -ENAMETOOLONG;
1067*4882a593Smuzhiyun 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1068*4882a593Smuzhiyun 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1069*4882a593Smuzhiyun 		goto error;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	aead = crypto_alloc_aead(aead_name, 0, 0);
1072*4882a593Smuzhiyun 	err = PTR_ERR(aead);
1073*4882a593Smuzhiyun 	if (IS_ERR(aead))
1074*4882a593Smuzhiyun 		goto error;
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	x->data = aead;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	err = crypto_aead_setkey(aead, x->aead->alg_key,
1079*4882a593Smuzhiyun 				 (x->aead->alg_key_len + 7) / 8);
1080*4882a593Smuzhiyun 	if (err)
1081*4882a593Smuzhiyun 		goto error;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1084*4882a593Smuzhiyun 	if (err)
1085*4882a593Smuzhiyun 		goto error;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun error:
1088*4882a593Smuzhiyun 	return err;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun 
esp_init_authenc(struct xfrm_state * x)1091*4882a593Smuzhiyun static int esp_init_authenc(struct xfrm_state *x)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun 	struct crypto_aead *aead;
1094*4882a593Smuzhiyun 	struct crypto_authenc_key_param *param;
1095*4882a593Smuzhiyun 	struct rtattr *rta;
1096*4882a593Smuzhiyun 	char *key;
1097*4882a593Smuzhiyun 	char *p;
1098*4882a593Smuzhiyun 	char authenc_name[CRYPTO_MAX_ALG_NAME];
1099*4882a593Smuzhiyun 	unsigned int keylen;
1100*4882a593Smuzhiyun 	int err;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	err = -EINVAL;
1103*4882a593Smuzhiyun 	if (!x->ealg)
1104*4882a593Smuzhiyun 		goto error;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	err = -ENAMETOOLONG;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if ((x->props.flags & XFRM_STATE_ESN)) {
1109*4882a593Smuzhiyun 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1110*4882a593Smuzhiyun 			     "%s%sauthencesn(%s,%s)%s",
1111*4882a593Smuzhiyun 			     x->geniv ?: "", x->geniv ? "(" : "",
1112*4882a593Smuzhiyun 			     x->aalg ? x->aalg->alg_name : "digest_null",
1113*4882a593Smuzhiyun 			     x->ealg->alg_name,
1114*4882a593Smuzhiyun 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1115*4882a593Smuzhiyun 			goto error;
1116*4882a593Smuzhiyun 	} else {
1117*4882a593Smuzhiyun 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1118*4882a593Smuzhiyun 			     "%s%sauthenc(%s,%s)%s",
1119*4882a593Smuzhiyun 			     x->geniv ?: "", x->geniv ? "(" : "",
1120*4882a593Smuzhiyun 			     x->aalg ? x->aalg->alg_name : "digest_null",
1121*4882a593Smuzhiyun 			     x->ealg->alg_name,
1122*4882a593Smuzhiyun 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1123*4882a593Smuzhiyun 			goto error;
1124*4882a593Smuzhiyun 	}
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	aead = crypto_alloc_aead(authenc_name, 0, 0);
1127*4882a593Smuzhiyun 	err = PTR_ERR(aead);
1128*4882a593Smuzhiyun 	if (IS_ERR(aead))
1129*4882a593Smuzhiyun 		goto error;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	x->data = aead;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1134*4882a593Smuzhiyun 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1135*4882a593Smuzhiyun 	err = -ENOMEM;
1136*4882a593Smuzhiyun 	key = kmalloc(keylen, GFP_KERNEL);
1137*4882a593Smuzhiyun 	if (!key)
1138*4882a593Smuzhiyun 		goto error;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	p = key;
1141*4882a593Smuzhiyun 	rta = (void *)p;
1142*4882a593Smuzhiyun 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1143*4882a593Smuzhiyun 	rta->rta_len = RTA_LENGTH(sizeof(*param));
1144*4882a593Smuzhiyun 	param = RTA_DATA(rta);
1145*4882a593Smuzhiyun 	p += RTA_SPACE(sizeof(*param));
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	if (x->aalg) {
1148*4882a593Smuzhiyun 		struct xfrm_algo_desc *aalg_desc;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1151*4882a593Smuzhiyun 		p += (x->aalg->alg_key_len + 7) / 8;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1154*4882a593Smuzhiyun 		BUG_ON(!aalg_desc);
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 		err = -EINVAL;
1157*4882a593Smuzhiyun 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1158*4882a593Smuzhiyun 		    crypto_aead_authsize(aead)) {
1159*4882a593Smuzhiyun 			pr_info("ESP: %s digestsize %u != %hu\n",
1160*4882a593Smuzhiyun 				x->aalg->alg_name,
1161*4882a593Smuzhiyun 				crypto_aead_authsize(aead),
1162*4882a593Smuzhiyun 				aalg_desc->uinfo.auth.icv_fullbits / 8);
1163*4882a593Smuzhiyun 			goto free_key;
1164*4882a593Smuzhiyun 		}
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 		err = crypto_aead_setauthsize(
1167*4882a593Smuzhiyun 			aead, x->aalg->alg_trunc_len / 8);
1168*4882a593Smuzhiyun 		if (err)
1169*4882a593Smuzhiyun 			goto free_key;
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1173*4882a593Smuzhiyun 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	err = crypto_aead_setkey(aead, key, keylen);
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun free_key:
1178*4882a593Smuzhiyun 	kfree(key);
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun error:
1181*4882a593Smuzhiyun 	return err;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun 
esp6_init_state(struct xfrm_state * x)1184*4882a593Smuzhiyun static int esp6_init_state(struct xfrm_state *x)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun 	struct crypto_aead *aead;
1187*4882a593Smuzhiyun 	u32 align;
1188*4882a593Smuzhiyun 	int err;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	x->data = NULL;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	if (x->aead)
1193*4882a593Smuzhiyun 		err = esp_init_aead(x);
1194*4882a593Smuzhiyun 	else
1195*4882a593Smuzhiyun 		err = esp_init_authenc(x);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (err)
1198*4882a593Smuzhiyun 		goto error;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	aead = x->data;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	x->props.header_len = sizeof(struct ip_esp_hdr) +
1203*4882a593Smuzhiyun 			      crypto_aead_ivsize(aead);
1204*4882a593Smuzhiyun 	switch (x->props.mode) {
1205*4882a593Smuzhiyun 	case XFRM_MODE_BEET:
1206*4882a593Smuzhiyun 		if (x->sel.family != AF_INET6)
1207*4882a593Smuzhiyun 			x->props.header_len += IPV4_BEET_PHMAXLEN +
1208*4882a593Smuzhiyun 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1209*4882a593Smuzhiyun 		break;
1210*4882a593Smuzhiyun 	default:
1211*4882a593Smuzhiyun 	case XFRM_MODE_TRANSPORT:
1212*4882a593Smuzhiyun 		break;
1213*4882a593Smuzhiyun 	case XFRM_MODE_TUNNEL:
1214*4882a593Smuzhiyun 		x->props.header_len += sizeof(struct ipv6hdr);
1215*4882a593Smuzhiyun 		break;
1216*4882a593Smuzhiyun 	}
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	if (x->encap) {
1219*4882a593Smuzhiyun 		struct xfrm_encap_tmpl *encap = x->encap;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 		switch (encap->encap_type) {
1222*4882a593Smuzhiyun 		default:
1223*4882a593Smuzhiyun 			err = -EINVAL;
1224*4882a593Smuzhiyun 			goto error;
1225*4882a593Smuzhiyun 		case UDP_ENCAP_ESPINUDP:
1226*4882a593Smuzhiyun 			x->props.header_len += sizeof(struct udphdr);
1227*4882a593Smuzhiyun 			break;
1228*4882a593Smuzhiyun 		case UDP_ENCAP_ESPINUDP_NON_IKE:
1229*4882a593Smuzhiyun 			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1230*4882a593Smuzhiyun 			break;
1231*4882a593Smuzhiyun #ifdef CONFIG_INET6_ESPINTCP
1232*4882a593Smuzhiyun 		case TCP_ENCAP_ESPINTCP:
1233*4882a593Smuzhiyun 			/* only the length field, TCP encap is done by
1234*4882a593Smuzhiyun 			 * the socket
1235*4882a593Smuzhiyun 			 */
1236*4882a593Smuzhiyun 			x->props.header_len += 2;
1237*4882a593Smuzhiyun 			break;
1238*4882a593Smuzhiyun #endif
1239*4882a593Smuzhiyun 		}
1240*4882a593Smuzhiyun 	}
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	align = ALIGN(crypto_aead_blocksize(aead), 4);
1243*4882a593Smuzhiyun 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun error:
1246*4882a593Smuzhiyun 	return err;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun 
esp6_rcv_cb(struct sk_buff * skb,int err)1249*4882a593Smuzhiyun static int esp6_rcv_cb(struct sk_buff *skb, int err)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun 	return 0;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun static const struct xfrm_type esp6_type = {
1255*4882a593Smuzhiyun 	.description	= "ESP6",
1256*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
1257*4882a593Smuzhiyun 	.proto		= IPPROTO_ESP,
1258*4882a593Smuzhiyun 	.flags		= XFRM_TYPE_REPLAY_PROT,
1259*4882a593Smuzhiyun 	.init_state	= esp6_init_state,
1260*4882a593Smuzhiyun 	.destructor	= esp6_destroy,
1261*4882a593Smuzhiyun 	.input		= esp6_input,
1262*4882a593Smuzhiyun 	.output		= esp6_output,
1263*4882a593Smuzhiyun 	.hdr_offset	= xfrm6_find_1stfragopt,
1264*4882a593Smuzhiyun };
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun static struct xfrm6_protocol esp6_protocol = {
1267*4882a593Smuzhiyun 	.handler	=	xfrm6_rcv,
1268*4882a593Smuzhiyun 	.input_handler	=	xfrm_input,
1269*4882a593Smuzhiyun 	.cb_handler	=	esp6_rcv_cb,
1270*4882a593Smuzhiyun 	.err_handler	=	esp6_err,
1271*4882a593Smuzhiyun 	.priority	=	0,
1272*4882a593Smuzhiyun };
1273*4882a593Smuzhiyun 
esp6_init(void)1274*4882a593Smuzhiyun static int __init esp6_init(void)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1277*4882a593Smuzhiyun 		pr_info("%s: can't add xfrm type\n", __func__);
1278*4882a593Smuzhiyun 		return -EAGAIN;
1279*4882a593Smuzhiyun 	}
1280*4882a593Smuzhiyun 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1281*4882a593Smuzhiyun 		pr_info("%s: can't add protocol\n", __func__);
1282*4882a593Smuzhiyun 		xfrm_unregister_type(&esp6_type, AF_INET6);
1283*4882a593Smuzhiyun 		return -EAGAIN;
1284*4882a593Smuzhiyun 	}
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	return 0;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun 
esp6_fini(void)1289*4882a593Smuzhiyun static void __exit esp6_fini(void)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1292*4882a593Smuzhiyun 		pr_info("%s: can't remove protocol\n", __func__);
1293*4882a593Smuzhiyun 	xfrm_unregister_type(&esp6_type, AF_INET6);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun module_init(esp6_init);
1297*4882a593Smuzhiyun module_exit(esp6_fini);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1300*4882a593Smuzhiyun MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1301