1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #define pr_fmt(fmt) "IPsec: " fmt
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <crypto/aead.h>
5*4882a593Smuzhiyun #include <crypto/authenc.h>
6*4882a593Smuzhiyun #include <linux/err.h>
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <net/ip.h>
9*4882a593Smuzhiyun #include <net/xfrm.h>
10*4882a593Smuzhiyun #include <net/esp.h>
11*4882a593Smuzhiyun #include <linux/scatterlist.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/pfkeyv2.h>
14*4882a593Smuzhiyun #include <linux/rtnetlink.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <linux/in6.h>
18*4882a593Smuzhiyun #include <net/icmp.h>
19*4882a593Smuzhiyun #include <net/protocol.h>
20*4882a593Smuzhiyun #include <net/udp.h>
21*4882a593Smuzhiyun #include <net/tcp.h>
22*4882a593Smuzhiyun #include <net/espintcp.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/highmem.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun struct esp_skb_cb {
27*4882a593Smuzhiyun struct xfrm_skb_cb xfrm;
28*4882a593Smuzhiyun void *tmp;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct esp_output_extra {
32*4882a593Smuzhiyun __be32 seqhi;
33*4882a593Smuzhiyun u32 esphoff;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Allocate an AEAD request structure with extra space for SG and IV.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * For alignment considerations the IV is placed at the front, followed
42*4882a593Smuzhiyun * by the request and finally the SG list.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * TODO: Use spare space in skb for this where possible.
45*4882a593Smuzhiyun */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int extralen)46*4882a593Smuzhiyun static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun unsigned int len;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun len = extralen;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun len += crypto_aead_ivsize(aead);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (len) {
55*4882a593Smuzhiyun len += crypto_aead_alignmask(aead) &
56*4882a593Smuzhiyun ~(crypto_tfm_ctx_alignment() - 1);
57*4882a593Smuzhiyun len = ALIGN(len, crypto_tfm_ctx_alignment());
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
61*4882a593Smuzhiyun len = ALIGN(len, __alignof__(struct scatterlist));
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun len += sizeof(struct scatterlist) * nfrags;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun return kmalloc(len, GFP_ATOMIC);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
esp_tmp_extra(void * tmp)68*4882a593Smuzhiyun static inline void *esp_tmp_extra(void *tmp)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int extralen)73*4882a593Smuzhiyun static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun return crypto_aead_ivsize(aead) ?
76*4882a593Smuzhiyun PTR_ALIGN((u8 *)tmp + extralen,
77*4882a593Smuzhiyun crypto_aead_alignmask(aead) + 1) : tmp + extralen;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
esp_tmp_req(struct crypto_aead * aead,u8 * iv)80*4882a593Smuzhiyun static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct aead_request *req;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
85*4882a593Smuzhiyun crypto_tfm_ctx_alignment());
86*4882a593Smuzhiyun aead_request_set_tfm(req, aead);
87*4882a593Smuzhiyun return req;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)90*4882a593Smuzhiyun static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
91*4882a593Smuzhiyun struct aead_request *req)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun return (void *)ALIGN((unsigned long)(req + 1) +
94*4882a593Smuzhiyun crypto_aead_reqsize(aead),
95*4882a593Smuzhiyun __alignof__(struct scatterlist));
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
esp_ssg_unref(struct xfrm_state * x,void * tmp)98*4882a593Smuzhiyun static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct esp_output_extra *extra = esp_tmp_extra(tmp);
101*4882a593Smuzhiyun struct crypto_aead *aead = x->data;
102*4882a593Smuzhiyun int extralen = 0;
103*4882a593Smuzhiyun u8 *iv;
104*4882a593Smuzhiyun struct aead_request *req;
105*4882a593Smuzhiyun struct scatterlist *sg;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (x->props.flags & XFRM_STATE_ESN)
108*4882a593Smuzhiyun extralen += sizeof(*extra);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun extra = esp_tmp_extra(tmp);
111*4882a593Smuzhiyun iv = esp_tmp_iv(aead, tmp, extralen);
112*4882a593Smuzhiyun req = esp_tmp_req(aead, iv);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Unref skb_frag_pages in the src scatterlist if necessary.
115*4882a593Smuzhiyun * Skip the first sg which comes from skb->data.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun if (req->src != req->dst)
118*4882a593Smuzhiyun for (sg = sg_next(req->src); sg; sg = sg_next(sg))
119*4882a593Smuzhiyun put_page(sg_page(sg));
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun #ifdef CONFIG_INET_ESPINTCP
123*4882a593Smuzhiyun struct esp_tcp_sk {
124*4882a593Smuzhiyun struct sock *sk;
125*4882a593Smuzhiyun struct rcu_head rcu;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
esp_free_tcp_sk(struct rcu_head * head)128*4882a593Smuzhiyun static void esp_free_tcp_sk(struct rcu_head *head)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun sock_put(esk->sk);
133*4882a593Smuzhiyun kfree(esk);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
esp_find_tcp_sk(struct xfrm_state * x)136*4882a593Smuzhiyun static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct xfrm_encap_tmpl *encap = x->encap;
139*4882a593Smuzhiyun struct esp_tcp_sk *esk;
140*4882a593Smuzhiyun __be16 sport, dport;
141*4882a593Smuzhiyun struct sock *nsk;
142*4882a593Smuzhiyun struct sock *sk;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun sk = rcu_dereference(x->encap_sk);
145*4882a593Smuzhiyun if (sk && sk->sk_state == TCP_ESTABLISHED)
146*4882a593Smuzhiyun return sk;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun spin_lock_bh(&x->lock);
149*4882a593Smuzhiyun sport = encap->encap_sport;
150*4882a593Smuzhiyun dport = encap->encap_dport;
151*4882a593Smuzhiyun nsk = rcu_dereference_protected(x->encap_sk,
152*4882a593Smuzhiyun lockdep_is_held(&x->lock));
153*4882a593Smuzhiyun if (sk && sk == nsk) {
154*4882a593Smuzhiyun esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
155*4882a593Smuzhiyun if (!esk) {
156*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
157*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun RCU_INIT_POINTER(x->encap_sk, NULL);
160*4882a593Smuzhiyun esk->sk = sk;
161*4882a593Smuzhiyun call_rcu(&esk->rcu, esp_free_tcp_sk);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun sk = inet_lookup_established(xs_net(x), &tcp_hashinfo, x->id.daddr.a4,
166*4882a593Smuzhiyun dport, x->props.saddr.a4, sport, 0);
167*4882a593Smuzhiyun if (!sk)
168*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!tcp_is_ulp_esp(sk)) {
171*4882a593Smuzhiyun sock_put(sk);
172*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun spin_lock_bh(&x->lock);
176*4882a593Smuzhiyun nsk = rcu_dereference_protected(x->encap_sk,
177*4882a593Smuzhiyun lockdep_is_held(&x->lock));
178*4882a593Smuzhiyun if (encap->encap_sport != sport ||
179*4882a593Smuzhiyun encap->encap_dport != dport) {
180*4882a593Smuzhiyun sock_put(sk);
181*4882a593Smuzhiyun sk = nsk ?: ERR_PTR(-EREMCHG);
182*4882a593Smuzhiyun } else if (sk == nsk) {
183*4882a593Smuzhiyun sock_put(sk);
184*4882a593Smuzhiyun } else {
185*4882a593Smuzhiyun rcu_assign_pointer(x->encap_sk, sk);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return sk;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)192*4882a593Smuzhiyun static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun struct sock *sk;
195*4882a593Smuzhiyun int err;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun rcu_read_lock();
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun sk = esp_find_tcp_sk(x);
200*4882a593Smuzhiyun err = PTR_ERR_OR_ZERO(sk);
201*4882a593Smuzhiyun if (err)
202*4882a593Smuzhiyun goto out;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun bh_lock_sock(sk);
205*4882a593Smuzhiyun if (sock_owned_by_user(sk))
206*4882a593Smuzhiyun err = espintcp_queue_out(sk, skb);
207*4882a593Smuzhiyun else
208*4882a593Smuzhiyun err = espintcp_push_skb(sk, skb);
209*4882a593Smuzhiyun bh_unlock_sock(sk);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun out:
212*4882a593Smuzhiyun rcu_read_unlock();
213*4882a593Smuzhiyun return err;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)216*4882a593Smuzhiyun static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
217*4882a593Smuzhiyun struct sk_buff *skb)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun struct dst_entry *dst = skb_dst(skb);
220*4882a593Smuzhiyun struct xfrm_state *x = dst->xfrm;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return esp_output_tcp_finish(x, skb);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)225*4882a593Smuzhiyun static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int err;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun local_bh_disable();
230*4882a593Smuzhiyun err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
231*4882a593Smuzhiyun local_bh_enable();
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* EINPROGRESS just happens to do the right thing. It
234*4882a593Smuzhiyun * actually means that the skb has been consumed and
235*4882a593Smuzhiyun * isn't coming back.
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun return err ?: -EINPROGRESS;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)240*4882a593Smuzhiyun static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun kfree_skb(skb);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return -EOPNOTSUPP;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun
esp_output_done(struct crypto_async_request * base,int err)248*4882a593Smuzhiyun static void esp_output_done(struct crypto_async_request *base, int err)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct sk_buff *skb = base->data;
251*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
252*4882a593Smuzhiyun void *tmp;
253*4882a593Smuzhiyun struct xfrm_state *x;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (xo && (xo->flags & XFRM_DEV_RESUME)) {
256*4882a593Smuzhiyun struct sec_path *sp = skb_sec_path(skb);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun x = sp->xvec[sp->len - 1];
259*4882a593Smuzhiyun } else {
260*4882a593Smuzhiyun x = skb_dst(skb)->xfrm;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun tmp = ESP_SKB_CB(skb)->tmp;
264*4882a593Smuzhiyun esp_ssg_unref(x, tmp);
265*4882a593Smuzhiyun kfree(tmp);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (xo && (xo->flags & XFRM_DEV_RESUME)) {
268*4882a593Smuzhiyun if (err) {
269*4882a593Smuzhiyun XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
270*4882a593Smuzhiyun kfree_skb(skb);
271*4882a593Smuzhiyun return;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun skb_push(skb, skb->data - skb_mac_header(skb));
275*4882a593Smuzhiyun secpath_reset(skb);
276*4882a593Smuzhiyun xfrm_dev_resume(skb);
277*4882a593Smuzhiyun } else {
278*4882a593Smuzhiyun if (!err &&
279*4882a593Smuzhiyun x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
280*4882a593Smuzhiyun esp_output_tail_tcp(x, skb);
281*4882a593Smuzhiyun else
282*4882a593Smuzhiyun xfrm_output_resume(skb, err);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)287*4882a593Smuzhiyun static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct ip_esp_hdr *esph = (void *)(skb->data + offset);
290*4882a593Smuzhiyun void *tmp = ESP_SKB_CB(skb)->tmp;
291*4882a593Smuzhiyun __be32 *seqhi = esp_tmp_extra(tmp);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun esph->seq_no = esph->spi;
294*4882a593Smuzhiyun esph->spi = *seqhi;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
esp_output_restore_header(struct sk_buff * skb)297*4882a593Smuzhiyun static void esp_output_restore_header(struct sk_buff *skb)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun void *tmp = ESP_SKB_CB(skb)->tmp;
300*4882a593Smuzhiyun struct esp_output_extra *extra = esp_tmp_extra(tmp);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
303*4882a593Smuzhiyun sizeof(__be32));
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
esp_output_set_extra(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)306*4882a593Smuzhiyun static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
307*4882a593Smuzhiyun struct xfrm_state *x,
308*4882a593Smuzhiyun struct ip_esp_hdr *esph,
309*4882a593Smuzhiyun struct esp_output_extra *extra)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun /* For ESN we move the header forward by 4 bytes to
312*4882a593Smuzhiyun * accomodate the high bits. We will move it back after
313*4882a593Smuzhiyun * encryption.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN)) {
316*4882a593Smuzhiyun __u32 seqhi;
317*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (xo)
320*4882a593Smuzhiyun seqhi = xo->seq.hi;
321*4882a593Smuzhiyun else
322*4882a593Smuzhiyun seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun extra->esphoff = (unsigned char *)esph -
325*4882a593Smuzhiyun skb_transport_header(skb);
326*4882a593Smuzhiyun esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
327*4882a593Smuzhiyun extra->seqhi = esph->spi;
328*4882a593Smuzhiyun esph->seq_no = htonl(seqhi);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun esph->spi = x->id.spi;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return esph;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
esp_output_done_esn(struct crypto_async_request * base,int err)336*4882a593Smuzhiyun static void esp_output_done_esn(struct crypto_async_request *base, int err)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct sk_buff *skb = base->data;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun esp_output_restore_header(skb);
341*4882a593Smuzhiyun esp_output_done(base, err);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
esp_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)344*4882a593Smuzhiyun static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
345*4882a593Smuzhiyun int encap_type,
346*4882a593Smuzhiyun struct esp_info *esp,
347*4882a593Smuzhiyun __be16 sport,
348*4882a593Smuzhiyun __be16 dport)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun struct udphdr *uh;
351*4882a593Smuzhiyun __be32 *udpdata32;
352*4882a593Smuzhiyun unsigned int len;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun len = skb->len + esp->tailen - skb_transport_offset(skb);
355*4882a593Smuzhiyun if (len + sizeof(struct iphdr) > IP_MAX_MTU)
356*4882a593Smuzhiyun return ERR_PTR(-EMSGSIZE);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun uh = (struct udphdr *)esp->esph;
359*4882a593Smuzhiyun uh->source = sport;
360*4882a593Smuzhiyun uh->dest = dport;
361*4882a593Smuzhiyun uh->len = htons(len);
362*4882a593Smuzhiyun uh->check = 0;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun *skb_mac_header(skb) = IPPROTO_UDP;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
367*4882a593Smuzhiyun udpdata32 = (__be32 *)(uh + 1);
368*4882a593Smuzhiyun udpdata32[0] = udpdata32[1] = 0;
369*4882a593Smuzhiyun return (struct ip_esp_hdr *)(udpdata32 + 2);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun return (struct ip_esp_hdr *)(uh + 1);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun #ifdef CONFIG_INET_ESPINTCP
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)376*4882a593Smuzhiyun static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
377*4882a593Smuzhiyun struct sk_buff *skb,
378*4882a593Smuzhiyun struct esp_info *esp)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun __be16 *lenp = (void *)esp->esph;
381*4882a593Smuzhiyun struct ip_esp_hdr *esph;
382*4882a593Smuzhiyun unsigned int len;
383*4882a593Smuzhiyun struct sock *sk;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun len = skb->len + esp->tailen - skb_transport_offset(skb);
386*4882a593Smuzhiyun if (len > IP_MAX_MTU)
387*4882a593Smuzhiyun return ERR_PTR(-EMSGSIZE);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun rcu_read_lock();
390*4882a593Smuzhiyun sk = esp_find_tcp_sk(x);
391*4882a593Smuzhiyun rcu_read_unlock();
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (IS_ERR(sk))
394*4882a593Smuzhiyun return ERR_CAST(sk);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun *lenp = htons(len);
397*4882a593Smuzhiyun esph = (struct ip_esp_hdr *)(lenp + 1);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return esph;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun #else
esp_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)402*4882a593Smuzhiyun static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
403*4882a593Smuzhiyun struct sk_buff *skb,
404*4882a593Smuzhiyun struct esp_info *esp)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun #endif
409*4882a593Smuzhiyun
esp_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)410*4882a593Smuzhiyun static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
411*4882a593Smuzhiyun struct esp_info *esp)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct xfrm_encap_tmpl *encap = x->encap;
414*4882a593Smuzhiyun struct ip_esp_hdr *esph;
415*4882a593Smuzhiyun __be16 sport, dport;
416*4882a593Smuzhiyun int encap_type;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun spin_lock_bh(&x->lock);
419*4882a593Smuzhiyun sport = encap->encap_sport;
420*4882a593Smuzhiyun dport = encap->encap_dport;
421*4882a593Smuzhiyun encap_type = encap->encap_type;
422*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun switch (encap_type) {
425*4882a593Smuzhiyun default:
426*4882a593Smuzhiyun case UDP_ENCAP_ESPINUDP:
427*4882a593Smuzhiyun case UDP_ENCAP_ESPINUDP_NON_IKE:
428*4882a593Smuzhiyun esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
429*4882a593Smuzhiyun break;
430*4882a593Smuzhiyun case TCP_ENCAP_ESPINTCP:
431*4882a593Smuzhiyun esph = esp_output_tcp_encap(x, skb, esp);
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (IS_ERR(esph))
436*4882a593Smuzhiyun return PTR_ERR(esph);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun esp->esph = esph;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
esp_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)443*4882a593Smuzhiyun int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun u8 *tail;
446*4882a593Smuzhiyun int nfrags;
447*4882a593Smuzhiyun int esph_offset;
448*4882a593Smuzhiyun struct page *page;
449*4882a593Smuzhiyun struct sk_buff *trailer;
450*4882a593Smuzhiyun int tailen = esp->tailen;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* this is non-NULL only with TCP/UDP Encapsulation */
453*4882a593Smuzhiyun if (x->encap) {
454*4882a593Smuzhiyun int err = esp_output_encap(x, skb, esp);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (err < 0)
457*4882a593Smuzhiyun return err;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
461*4882a593Smuzhiyun ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
462*4882a593Smuzhiyun goto cow;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (!skb_cloned(skb)) {
465*4882a593Smuzhiyun if (tailen <= skb_tailroom(skb)) {
466*4882a593Smuzhiyun nfrags = 1;
467*4882a593Smuzhiyun trailer = skb;
468*4882a593Smuzhiyun tail = skb_tail_pointer(trailer);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun goto skip_cow;
471*4882a593Smuzhiyun } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
472*4882a593Smuzhiyun && !skb_has_frag_list(skb)) {
473*4882a593Smuzhiyun int allocsize;
474*4882a593Smuzhiyun struct sock *sk = skb->sk;
475*4882a593Smuzhiyun struct page_frag *pfrag = &x->xfrag;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun esp->inplace = false;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun allocsize = ALIGN(tailen, L1_CACHE_BYTES);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun spin_lock_bh(&x->lock);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
484*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
485*4882a593Smuzhiyun goto cow;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun page = pfrag->page;
489*4882a593Smuzhiyun get_page(page);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun tail = page_address(page) + pfrag->offset;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun nfrags = skb_shinfo(skb)->nr_frags;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
498*4882a593Smuzhiyun tailen);
499*4882a593Smuzhiyun skb_shinfo(skb)->nr_frags = ++nfrags;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun pfrag->offset = pfrag->offset + allocsize;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun nfrags++;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun skb->len += tailen;
508*4882a593Smuzhiyun skb->data_len += tailen;
509*4882a593Smuzhiyun skb->truesize += tailen;
510*4882a593Smuzhiyun if (sk && sk_fullsock(sk))
511*4882a593Smuzhiyun refcount_add(tailen, &sk->sk_wmem_alloc);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun goto out;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun cow:
518*4882a593Smuzhiyun esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun nfrags = skb_cow_data(skb, tailen, &trailer);
521*4882a593Smuzhiyun if (nfrags < 0)
522*4882a593Smuzhiyun goto out;
523*4882a593Smuzhiyun tail = skb_tail_pointer(trailer);
524*4882a593Smuzhiyun esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun skip_cow:
527*4882a593Smuzhiyun esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
528*4882a593Smuzhiyun pskb_put(skb, trailer, tailen);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun out:
531*4882a593Smuzhiyun return nfrags;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(esp_output_head);
534*4882a593Smuzhiyun
esp_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)535*4882a593Smuzhiyun int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun u8 *iv;
538*4882a593Smuzhiyun int alen;
539*4882a593Smuzhiyun void *tmp;
540*4882a593Smuzhiyun int ivlen;
541*4882a593Smuzhiyun int assoclen;
542*4882a593Smuzhiyun int extralen;
543*4882a593Smuzhiyun struct page *page;
544*4882a593Smuzhiyun struct ip_esp_hdr *esph;
545*4882a593Smuzhiyun struct crypto_aead *aead;
546*4882a593Smuzhiyun struct aead_request *req;
547*4882a593Smuzhiyun struct scatterlist *sg, *dsg;
548*4882a593Smuzhiyun struct esp_output_extra *extra;
549*4882a593Smuzhiyun int err = -ENOMEM;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun assoclen = sizeof(struct ip_esp_hdr);
552*4882a593Smuzhiyun extralen = 0;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (x->props.flags & XFRM_STATE_ESN) {
555*4882a593Smuzhiyun extralen += sizeof(*extra);
556*4882a593Smuzhiyun assoclen += sizeof(__be32);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun aead = x->data;
560*4882a593Smuzhiyun alen = crypto_aead_authsize(aead);
561*4882a593Smuzhiyun ivlen = crypto_aead_ivsize(aead);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
564*4882a593Smuzhiyun if (!tmp)
565*4882a593Smuzhiyun goto error;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun extra = esp_tmp_extra(tmp);
568*4882a593Smuzhiyun iv = esp_tmp_iv(aead, tmp, extralen);
569*4882a593Smuzhiyun req = esp_tmp_req(aead, iv);
570*4882a593Smuzhiyun sg = esp_req_sg(aead, req);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (esp->inplace)
573*4882a593Smuzhiyun dsg = sg;
574*4882a593Smuzhiyun else
575*4882a593Smuzhiyun dsg = &sg[esp->nfrags];
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun esph = esp_output_set_extra(skb, x, esp->esph, extra);
578*4882a593Smuzhiyun esp->esph = esph;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun sg_init_table(sg, esp->nfrags);
581*4882a593Smuzhiyun err = skb_to_sgvec(skb, sg,
582*4882a593Smuzhiyun (unsigned char *)esph - skb->data,
583*4882a593Smuzhiyun assoclen + ivlen + esp->clen + alen);
584*4882a593Smuzhiyun if (unlikely(err < 0))
585*4882a593Smuzhiyun goto error_free;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (!esp->inplace) {
588*4882a593Smuzhiyun int allocsize;
589*4882a593Smuzhiyun struct page_frag *pfrag = &x->xfrag;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun spin_lock_bh(&x->lock);
594*4882a593Smuzhiyun if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
595*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
596*4882a593Smuzhiyun goto error_free;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun skb_shinfo(skb)->nr_frags = 1;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun page = pfrag->page;
602*4882a593Smuzhiyun get_page(page);
603*4882a593Smuzhiyun /* replace page frags in skb with new page */
604*4882a593Smuzhiyun __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
605*4882a593Smuzhiyun pfrag->offset = pfrag->offset + allocsize;
606*4882a593Smuzhiyun spin_unlock_bh(&x->lock);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
609*4882a593Smuzhiyun err = skb_to_sgvec(skb, dsg,
610*4882a593Smuzhiyun (unsigned char *)esph - skb->data,
611*4882a593Smuzhiyun assoclen + ivlen + esp->clen + alen);
612*4882a593Smuzhiyun if (unlikely(err < 0))
613*4882a593Smuzhiyun goto error_free;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN))
617*4882a593Smuzhiyun aead_request_set_callback(req, 0, esp_output_done_esn, skb);
618*4882a593Smuzhiyun else
619*4882a593Smuzhiyun aead_request_set_callback(req, 0, esp_output_done, skb);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
622*4882a593Smuzhiyun aead_request_set_ad(req, assoclen);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun memset(iv, 0, ivlen);
625*4882a593Smuzhiyun memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
626*4882a593Smuzhiyun min(ivlen, 8));
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun ESP_SKB_CB(skb)->tmp = tmp;
629*4882a593Smuzhiyun err = crypto_aead_encrypt(req);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun switch (err) {
632*4882a593Smuzhiyun case -EINPROGRESS:
633*4882a593Smuzhiyun goto error;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun case -ENOSPC:
636*4882a593Smuzhiyun err = NET_XMIT_DROP;
637*4882a593Smuzhiyun break;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun case 0:
640*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN))
641*4882a593Smuzhiyun esp_output_restore_header(skb);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (sg != dsg)
645*4882a593Smuzhiyun esp_ssg_unref(x, tmp);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
648*4882a593Smuzhiyun err = esp_output_tail_tcp(x, skb);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun error_free:
651*4882a593Smuzhiyun kfree(tmp);
652*4882a593Smuzhiyun error:
653*4882a593Smuzhiyun return err;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(esp_output_tail);
656*4882a593Smuzhiyun
esp_output(struct xfrm_state * x,struct sk_buff * skb)657*4882a593Smuzhiyun static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun int alen;
660*4882a593Smuzhiyun int blksize;
661*4882a593Smuzhiyun struct ip_esp_hdr *esph;
662*4882a593Smuzhiyun struct crypto_aead *aead;
663*4882a593Smuzhiyun struct esp_info esp;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun esp.inplace = true;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun esp.proto = *skb_mac_header(skb);
668*4882a593Smuzhiyun *skb_mac_header(skb) = IPPROTO_ESP;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /* skb is pure payload to encrypt */
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun aead = x->data;
673*4882a593Smuzhiyun alen = crypto_aead_authsize(aead);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun esp.tfclen = 0;
676*4882a593Smuzhiyun if (x->tfcpad) {
677*4882a593Smuzhiyun struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
678*4882a593Smuzhiyun u32 padto;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
681*4882a593Smuzhiyun if (skb->len < padto)
682*4882a593Smuzhiyun esp.tfclen = padto - skb->len;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun blksize = ALIGN(crypto_aead_blocksize(aead), 4);
685*4882a593Smuzhiyun esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
686*4882a593Smuzhiyun esp.plen = esp.clen - skb->len - esp.tfclen;
687*4882a593Smuzhiyun esp.tailen = esp.tfclen + esp.plen + alen;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun esp.esph = ip_esp_hdr(skb);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun esp.nfrags = esp_output_head(x, skb, &esp);
692*4882a593Smuzhiyun if (esp.nfrags < 0)
693*4882a593Smuzhiyun return esp.nfrags;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun esph = esp.esph;
696*4882a593Smuzhiyun esph->spi = x->id.spi;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
699*4882a593Smuzhiyun esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
700*4882a593Smuzhiyun ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun skb_push(skb, -skb_network_offset(skb));
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return esp_output_tail(x, skb, &esp);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
esp_remove_trailer(struct sk_buff * skb)707*4882a593Smuzhiyun static inline int esp_remove_trailer(struct sk_buff *skb)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun struct xfrm_state *x = xfrm_input_state(skb);
710*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
711*4882a593Smuzhiyun struct crypto_aead *aead = x->data;
712*4882a593Smuzhiyun int alen, hlen, elen;
713*4882a593Smuzhiyun int padlen, trimlen;
714*4882a593Smuzhiyun __wsum csumdiff;
715*4882a593Smuzhiyun u8 nexthdr[2];
716*4882a593Smuzhiyun int ret;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun alen = crypto_aead_authsize(aead);
719*4882a593Smuzhiyun hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
720*4882a593Smuzhiyun elen = skb->len - hlen;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
723*4882a593Smuzhiyun ret = xo->proto;
724*4882a593Smuzhiyun goto out;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
728*4882a593Smuzhiyun BUG();
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun ret = -EINVAL;
731*4882a593Smuzhiyun padlen = nexthdr[0];
732*4882a593Smuzhiyun if (padlen + 2 + alen >= elen) {
733*4882a593Smuzhiyun net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
734*4882a593Smuzhiyun padlen + 2, elen - alen);
735*4882a593Smuzhiyun goto out;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun trimlen = alen + padlen + 2;
739*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_COMPLETE) {
740*4882a593Smuzhiyun csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
741*4882a593Smuzhiyun skb->csum = csum_block_sub(skb->csum, csumdiff,
742*4882a593Smuzhiyun skb->len - trimlen);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun pskb_trim(skb, skb->len - trimlen);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun ret = nexthdr[1];
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun out:
749*4882a593Smuzhiyun return ret;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
esp_input_done2(struct sk_buff * skb,int err)752*4882a593Smuzhiyun int esp_input_done2(struct sk_buff *skb, int err)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun const struct iphdr *iph;
755*4882a593Smuzhiyun struct xfrm_state *x = xfrm_input_state(skb);
756*4882a593Smuzhiyun struct xfrm_offload *xo = xfrm_offload(skb);
757*4882a593Smuzhiyun struct crypto_aead *aead = x->data;
758*4882a593Smuzhiyun int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
759*4882a593Smuzhiyun int ihl;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
762*4882a593Smuzhiyun kfree(ESP_SKB_CB(skb)->tmp);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (unlikely(err))
765*4882a593Smuzhiyun goto out;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun err = esp_remove_trailer(skb);
768*4882a593Smuzhiyun if (unlikely(err < 0))
769*4882a593Smuzhiyun goto out;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun iph = ip_hdr(skb);
772*4882a593Smuzhiyun ihl = iph->ihl * 4;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (x->encap) {
775*4882a593Smuzhiyun struct xfrm_encap_tmpl *encap = x->encap;
776*4882a593Smuzhiyun struct tcphdr *th = (void *)(skb_network_header(skb) + ihl);
777*4882a593Smuzhiyun struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
778*4882a593Smuzhiyun __be16 source;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun switch (x->encap->encap_type) {
781*4882a593Smuzhiyun case TCP_ENCAP_ESPINTCP:
782*4882a593Smuzhiyun source = th->source;
783*4882a593Smuzhiyun break;
784*4882a593Smuzhiyun case UDP_ENCAP_ESPINUDP:
785*4882a593Smuzhiyun case UDP_ENCAP_ESPINUDP_NON_IKE:
786*4882a593Smuzhiyun source = uh->source;
787*4882a593Smuzhiyun break;
788*4882a593Smuzhiyun default:
789*4882a593Smuzhiyun WARN_ON_ONCE(1);
790*4882a593Smuzhiyun err = -EINVAL;
791*4882a593Smuzhiyun goto out;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * 1) if the NAT-T peer's IP or port changed then
796*4882a593Smuzhiyun * advertize the change to the keying daemon.
797*4882a593Smuzhiyun * This is an inbound SA, so just compare
798*4882a593Smuzhiyun * SRC ports.
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun if (iph->saddr != x->props.saddr.a4 ||
801*4882a593Smuzhiyun source != encap->encap_sport) {
802*4882a593Smuzhiyun xfrm_address_t ipaddr;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun ipaddr.a4 = iph->saddr;
805*4882a593Smuzhiyun km_new_mapping(x, &ipaddr, source);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* XXX: perhaps add an extra
808*4882a593Smuzhiyun * policy check here, to see
809*4882a593Smuzhiyun * if we should allow or
810*4882a593Smuzhiyun * reject a packet from a
811*4882a593Smuzhiyun * different source
812*4882a593Smuzhiyun * address/port.
813*4882a593Smuzhiyun */
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * 2) ignore UDP/TCP checksums in case
818*4882a593Smuzhiyun * of NAT-T in Transport Mode, or
819*4882a593Smuzhiyun * perform other post-processing fixes
820*4882a593Smuzhiyun * as per draft-ietf-ipsec-udp-encaps-06,
821*4882a593Smuzhiyun * section 3.1.2
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun if (x->props.mode == XFRM_MODE_TRANSPORT)
824*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun skb_pull_rcsum(skb, hlen);
828*4882a593Smuzhiyun if (x->props.mode == XFRM_MODE_TUNNEL)
829*4882a593Smuzhiyun skb_reset_transport_header(skb);
830*4882a593Smuzhiyun else
831*4882a593Smuzhiyun skb_set_transport_header(skb, -ihl);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /* RFC4303: Drop dummy packets without any error */
834*4882a593Smuzhiyun if (err == IPPROTO_NONE)
835*4882a593Smuzhiyun err = -EINVAL;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun out:
838*4882a593Smuzhiyun return err;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(esp_input_done2);
841*4882a593Smuzhiyun
esp_input_done(struct crypto_async_request * base,int err)842*4882a593Smuzhiyun static void esp_input_done(struct crypto_async_request *base, int err)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun struct sk_buff *skb = base->data;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun xfrm_input_resume(skb, esp_input_done2(skb, err));
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
esp_input_restore_header(struct sk_buff * skb)849*4882a593Smuzhiyun static void esp_input_restore_header(struct sk_buff *skb)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun esp_restore_header(skb, 0);
852*4882a593Smuzhiyun __skb_pull(skb, 4);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)855*4882a593Smuzhiyun static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct xfrm_state *x = xfrm_input_state(skb);
858*4882a593Smuzhiyun struct ip_esp_hdr *esph;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* For ESN we move the header forward by 4 bytes to
861*4882a593Smuzhiyun * accomodate the high bits. We will move it back after
862*4882a593Smuzhiyun * decryption.
863*4882a593Smuzhiyun */
864*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN)) {
865*4882a593Smuzhiyun esph = skb_push(skb, 4);
866*4882a593Smuzhiyun *seqhi = esph->spi;
867*4882a593Smuzhiyun esph->spi = esph->seq_no;
868*4882a593Smuzhiyun esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
esp_input_done_esn(struct crypto_async_request * base,int err)872*4882a593Smuzhiyun static void esp_input_done_esn(struct crypto_async_request *base, int err)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct sk_buff *skb = base->data;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun esp_input_restore_header(skb);
877*4882a593Smuzhiyun esp_input_done(base, err);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * Note: detecting truncated vs. non-truncated authentication data is very
882*4882a593Smuzhiyun * expensive, so we only support truncated data, which is the recommended
883*4882a593Smuzhiyun * and common case.
884*4882a593Smuzhiyun */
esp_input(struct xfrm_state * x,struct sk_buff * skb)885*4882a593Smuzhiyun static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun struct crypto_aead *aead = x->data;
888*4882a593Smuzhiyun struct aead_request *req;
889*4882a593Smuzhiyun struct sk_buff *trailer;
890*4882a593Smuzhiyun int ivlen = crypto_aead_ivsize(aead);
891*4882a593Smuzhiyun int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
892*4882a593Smuzhiyun int nfrags;
893*4882a593Smuzhiyun int assoclen;
894*4882a593Smuzhiyun int seqhilen;
895*4882a593Smuzhiyun __be32 *seqhi;
896*4882a593Smuzhiyun void *tmp;
897*4882a593Smuzhiyun u8 *iv;
898*4882a593Smuzhiyun struct scatterlist *sg;
899*4882a593Smuzhiyun int err = -EINVAL;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
902*4882a593Smuzhiyun goto out;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun if (elen <= 0)
905*4882a593Smuzhiyun goto out;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun assoclen = sizeof(struct ip_esp_hdr);
908*4882a593Smuzhiyun seqhilen = 0;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (x->props.flags & XFRM_STATE_ESN) {
911*4882a593Smuzhiyun seqhilen += sizeof(__be32);
912*4882a593Smuzhiyun assoclen += seqhilen;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (!skb_cloned(skb)) {
916*4882a593Smuzhiyun if (!skb_is_nonlinear(skb)) {
917*4882a593Smuzhiyun nfrags = 1;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun goto skip_cow;
920*4882a593Smuzhiyun } else if (!skb_has_frag_list(skb)) {
921*4882a593Smuzhiyun nfrags = skb_shinfo(skb)->nr_frags;
922*4882a593Smuzhiyun nfrags++;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun goto skip_cow;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun err = skb_cow_data(skb, 0, &trailer);
929*4882a593Smuzhiyun if (err < 0)
930*4882a593Smuzhiyun goto out;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun nfrags = err;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun skip_cow:
935*4882a593Smuzhiyun err = -ENOMEM;
936*4882a593Smuzhiyun tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
937*4882a593Smuzhiyun if (!tmp)
938*4882a593Smuzhiyun goto out;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun ESP_SKB_CB(skb)->tmp = tmp;
941*4882a593Smuzhiyun seqhi = esp_tmp_extra(tmp);
942*4882a593Smuzhiyun iv = esp_tmp_iv(aead, tmp, seqhilen);
943*4882a593Smuzhiyun req = esp_tmp_req(aead, iv);
944*4882a593Smuzhiyun sg = esp_req_sg(aead, req);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun esp_input_set_header(skb, seqhi);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun sg_init_table(sg, nfrags);
949*4882a593Smuzhiyun err = skb_to_sgvec(skb, sg, 0, skb->len);
950*4882a593Smuzhiyun if (unlikely(err < 0)) {
951*4882a593Smuzhiyun kfree(tmp);
952*4882a593Smuzhiyun goto out;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN))
958*4882a593Smuzhiyun aead_request_set_callback(req, 0, esp_input_done_esn, skb);
959*4882a593Smuzhiyun else
960*4882a593Smuzhiyun aead_request_set_callback(req, 0, esp_input_done, skb);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
963*4882a593Smuzhiyun aead_request_set_ad(req, assoclen);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun err = crypto_aead_decrypt(req);
966*4882a593Smuzhiyun if (err == -EINPROGRESS)
967*4882a593Smuzhiyun goto out;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN))
970*4882a593Smuzhiyun esp_input_restore_header(skb);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun err = esp_input_done2(skb, err);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun out:
975*4882a593Smuzhiyun return err;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
esp4_err(struct sk_buff * skb,u32 info)978*4882a593Smuzhiyun static int esp4_err(struct sk_buff *skb, u32 info)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun struct net *net = dev_net(skb->dev);
981*4882a593Smuzhiyun const struct iphdr *iph = (const struct iphdr *)skb->data;
982*4882a593Smuzhiyun struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
983*4882a593Smuzhiyun struct xfrm_state *x;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun switch (icmp_hdr(skb)->type) {
986*4882a593Smuzhiyun case ICMP_DEST_UNREACH:
987*4882a593Smuzhiyun if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
988*4882a593Smuzhiyun return 0;
989*4882a593Smuzhiyun case ICMP_REDIRECT:
990*4882a593Smuzhiyun break;
991*4882a593Smuzhiyun default:
992*4882a593Smuzhiyun return 0;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
996*4882a593Smuzhiyun esph->spi, IPPROTO_ESP, AF_INET);
997*4882a593Smuzhiyun if (!x)
998*4882a593Smuzhiyun return 0;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
1001*4882a593Smuzhiyun ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
1002*4882a593Smuzhiyun else
1003*4882a593Smuzhiyun ipv4_redirect(skb, net, 0, IPPROTO_ESP);
1004*4882a593Smuzhiyun xfrm_state_put(x);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun return 0;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
esp_destroy(struct xfrm_state * x)1009*4882a593Smuzhiyun static void esp_destroy(struct xfrm_state *x)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun struct crypto_aead *aead = x->data;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (!aead)
1014*4882a593Smuzhiyun return;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun crypto_free_aead(aead);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
esp_init_aead(struct xfrm_state * x)1019*4882a593Smuzhiyun static int esp_init_aead(struct xfrm_state *x)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun char aead_name[CRYPTO_MAX_ALG_NAME];
1022*4882a593Smuzhiyun struct crypto_aead *aead;
1023*4882a593Smuzhiyun int err;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun err = -ENAMETOOLONG;
1026*4882a593Smuzhiyun if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1027*4882a593Smuzhiyun x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1028*4882a593Smuzhiyun goto error;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun aead = crypto_alloc_aead(aead_name, 0, 0);
1031*4882a593Smuzhiyun err = PTR_ERR(aead);
1032*4882a593Smuzhiyun if (IS_ERR(aead))
1033*4882a593Smuzhiyun goto error;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun x->data = aead;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun err = crypto_aead_setkey(aead, x->aead->alg_key,
1038*4882a593Smuzhiyun (x->aead->alg_key_len + 7) / 8);
1039*4882a593Smuzhiyun if (err)
1040*4882a593Smuzhiyun goto error;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1043*4882a593Smuzhiyun if (err)
1044*4882a593Smuzhiyun goto error;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun error:
1047*4882a593Smuzhiyun return err;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
esp_init_authenc(struct xfrm_state * x)1050*4882a593Smuzhiyun static int esp_init_authenc(struct xfrm_state *x)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun struct crypto_aead *aead;
1053*4882a593Smuzhiyun struct crypto_authenc_key_param *param;
1054*4882a593Smuzhiyun struct rtattr *rta;
1055*4882a593Smuzhiyun char *key;
1056*4882a593Smuzhiyun char *p;
1057*4882a593Smuzhiyun char authenc_name[CRYPTO_MAX_ALG_NAME];
1058*4882a593Smuzhiyun unsigned int keylen;
1059*4882a593Smuzhiyun int err;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun err = -EINVAL;
1062*4882a593Smuzhiyun if (!x->ealg)
1063*4882a593Smuzhiyun goto error;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun err = -ENAMETOOLONG;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if ((x->props.flags & XFRM_STATE_ESN)) {
1068*4882a593Smuzhiyun if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1069*4882a593Smuzhiyun "%s%sauthencesn(%s,%s)%s",
1070*4882a593Smuzhiyun x->geniv ?: "", x->geniv ? "(" : "",
1071*4882a593Smuzhiyun x->aalg ? x->aalg->alg_name : "digest_null",
1072*4882a593Smuzhiyun x->ealg->alg_name,
1073*4882a593Smuzhiyun x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1074*4882a593Smuzhiyun goto error;
1075*4882a593Smuzhiyun } else {
1076*4882a593Smuzhiyun if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1077*4882a593Smuzhiyun "%s%sauthenc(%s,%s)%s",
1078*4882a593Smuzhiyun x->geniv ?: "", x->geniv ? "(" : "",
1079*4882a593Smuzhiyun x->aalg ? x->aalg->alg_name : "digest_null",
1080*4882a593Smuzhiyun x->ealg->alg_name,
1081*4882a593Smuzhiyun x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1082*4882a593Smuzhiyun goto error;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun aead = crypto_alloc_aead(authenc_name, 0, 0);
1086*4882a593Smuzhiyun err = PTR_ERR(aead);
1087*4882a593Smuzhiyun if (IS_ERR(aead))
1088*4882a593Smuzhiyun goto error;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun x->data = aead;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1093*4882a593Smuzhiyun (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1094*4882a593Smuzhiyun err = -ENOMEM;
1095*4882a593Smuzhiyun key = kmalloc(keylen, GFP_KERNEL);
1096*4882a593Smuzhiyun if (!key)
1097*4882a593Smuzhiyun goto error;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun p = key;
1100*4882a593Smuzhiyun rta = (void *)p;
1101*4882a593Smuzhiyun rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1102*4882a593Smuzhiyun rta->rta_len = RTA_LENGTH(sizeof(*param));
1103*4882a593Smuzhiyun param = RTA_DATA(rta);
1104*4882a593Smuzhiyun p += RTA_SPACE(sizeof(*param));
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (x->aalg) {
1107*4882a593Smuzhiyun struct xfrm_algo_desc *aalg_desc;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1110*4882a593Smuzhiyun p += (x->aalg->alg_key_len + 7) / 8;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1113*4882a593Smuzhiyun BUG_ON(!aalg_desc);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun err = -EINVAL;
1116*4882a593Smuzhiyun if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1117*4882a593Smuzhiyun crypto_aead_authsize(aead)) {
1118*4882a593Smuzhiyun pr_info("ESP: %s digestsize %u != %hu\n",
1119*4882a593Smuzhiyun x->aalg->alg_name,
1120*4882a593Smuzhiyun crypto_aead_authsize(aead),
1121*4882a593Smuzhiyun aalg_desc->uinfo.auth.icv_fullbits / 8);
1122*4882a593Smuzhiyun goto free_key;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun err = crypto_aead_setauthsize(
1126*4882a593Smuzhiyun aead, x->aalg->alg_trunc_len / 8);
1127*4882a593Smuzhiyun if (err)
1128*4882a593Smuzhiyun goto free_key;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1132*4882a593Smuzhiyun memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun err = crypto_aead_setkey(aead, key, keylen);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun free_key:
1137*4882a593Smuzhiyun kfree(key);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun error:
1140*4882a593Smuzhiyun return err;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
esp_init_state(struct xfrm_state * x)1143*4882a593Smuzhiyun static int esp_init_state(struct xfrm_state *x)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun struct crypto_aead *aead;
1146*4882a593Smuzhiyun u32 align;
1147*4882a593Smuzhiyun int err;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun x->data = NULL;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun if (x->aead)
1152*4882a593Smuzhiyun err = esp_init_aead(x);
1153*4882a593Smuzhiyun else
1154*4882a593Smuzhiyun err = esp_init_authenc(x);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (err)
1157*4882a593Smuzhiyun goto error;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun aead = x->data;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun x->props.header_len = sizeof(struct ip_esp_hdr) +
1162*4882a593Smuzhiyun crypto_aead_ivsize(aead);
1163*4882a593Smuzhiyun if (x->props.mode == XFRM_MODE_TUNNEL)
1164*4882a593Smuzhiyun x->props.header_len += sizeof(struct iphdr);
1165*4882a593Smuzhiyun else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
1166*4882a593Smuzhiyun x->props.header_len += IPV4_BEET_PHMAXLEN;
1167*4882a593Smuzhiyun if (x->encap) {
1168*4882a593Smuzhiyun struct xfrm_encap_tmpl *encap = x->encap;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun switch (encap->encap_type) {
1171*4882a593Smuzhiyun default:
1172*4882a593Smuzhiyun err = -EINVAL;
1173*4882a593Smuzhiyun goto error;
1174*4882a593Smuzhiyun case UDP_ENCAP_ESPINUDP:
1175*4882a593Smuzhiyun x->props.header_len += sizeof(struct udphdr);
1176*4882a593Smuzhiyun break;
1177*4882a593Smuzhiyun case UDP_ENCAP_ESPINUDP_NON_IKE:
1178*4882a593Smuzhiyun x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1179*4882a593Smuzhiyun break;
1180*4882a593Smuzhiyun #ifdef CONFIG_INET_ESPINTCP
1181*4882a593Smuzhiyun case TCP_ENCAP_ESPINTCP:
1182*4882a593Smuzhiyun /* only the length field, TCP encap is done by
1183*4882a593Smuzhiyun * the socket
1184*4882a593Smuzhiyun */
1185*4882a593Smuzhiyun x->props.header_len += 2;
1186*4882a593Smuzhiyun break;
1187*4882a593Smuzhiyun #endif
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun align = ALIGN(crypto_aead_blocksize(aead), 4);
1192*4882a593Smuzhiyun x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun error:
1195*4882a593Smuzhiyun return err;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
esp4_rcv_cb(struct sk_buff * skb,int err)1198*4882a593Smuzhiyun static int esp4_rcv_cb(struct sk_buff *skb, int err)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun return 0;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun static const struct xfrm_type esp_type =
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun .description = "ESP4",
1206*4882a593Smuzhiyun .owner = THIS_MODULE,
1207*4882a593Smuzhiyun .proto = IPPROTO_ESP,
1208*4882a593Smuzhiyun .flags = XFRM_TYPE_REPLAY_PROT,
1209*4882a593Smuzhiyun .init_state = esp_init_state,
1210*4882a593Smuzhiyun .destructor = esp_destroy,
1211*4882a593Smuzhiyun .input = esp_input,
1212*4882a593Smuzhiyun .output = esp_output,
1213*4882a593Smuzhiyun };
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun static struct xfrm4_protocol esp4_protocol = {
1216*4882a593Smuzhiyun .handler = xfrm4_rcv,
1217*4882a593Smuzhiyun .input_handler = xfrm_input,
1218*4882a593Smuzhiyun .cb_handler = esp4_rcv_cb,
1219*4882a593Smuzhiyun .err_handler = esp4_err,
1220*4882a593Smuzhiyun .priority = 0,
1221*4882a593Smuzhiyun };
1222*4882a593Smuzhiyun
esp4_init(void)1223*4882a593Smuzhiyun static int __init esp4_init(void)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun if (xfrm_register_type(&esp_type, AF_INET) < 0) {
1226*4882a593Smuzhiyun pr_info("%s: can't add xfrm type\n", __func__);
1227*4882a593Smuzhiyun return -EAGAIN;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
1230*4882a593Smuzhiyun pr_info("%s: can't add protocol\n", __func__);
1231*4882a593Smuzhiyun xfrm_unregister_type(&esp_type, AF_INET);
1232*4882a593Smuzhiyun return -EAGAIN;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun return 0;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
esp4_fini(void)1237*4882a593Smuzhiyun static void __exit esp4_fini(void)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
1240*4882a593Smuzhiyun pr_info("%s: can't remove protocol\n", __func__);
1241*4882a593Smuzhiyun xfrm_unregister_type(&esp_type, AF_INET);
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun module_init(esp4_init);
1245*4882a593Smuzhiyun module_exit(esp4_fini);
1246*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1247*4882a593Smuzhiyun MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
1248