xref: /OK3568_Linux_fs/kernel/net/sched/act_nat.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Stateless NAT actions
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/errno.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/netfilter.h>
13*4882a593Smuzhiyun #include <linux/rtnetlink.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <linux/string.h>
18*4882a593Smuzhiyun #include <linux/tc_act/tc_nat.h>
19*4882a593Smuzhiyun #include <net/act_api.h>
20*4882a593Smuzhiyun #include <net/pkt_cls.h>
21*4882a593Smuzhiyun #include <net/icmp.h>
22*4882a593Smuzhiyun #include <net/ip.h>
23*4882a593Smuzhiyun #include <net/netlink.h>
24*4882a593Smuzhiyun #include <net/tc_act/tc_nat.h>
25*4882a593Smuzhiyun #include <net/tcp.h>
26*4882a593Smuzhiyun #include <net/udp.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static unsigned int nat_net_id;
30*4882a593Smuzhiyun static struct tc_action_ops act_nat_ops;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
33*4882a593Smuzhiyun 	[TCA_NAT_PARMS]	= { .len = sizeof(struct tc_nat) },
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
tcf_nat_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,int ovr,int bind,bool rtnl_held,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)36*4882a593Smuzhiyun static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
37*4882a593Smuzhiyun 			struct tc_action **a, int ovr, int bind,
38*4882a593Smuzhiyun 			bool rtnl_held,	struct tcf_proto *tp,
39*4882a593Smuzhiyun 			u32 flags, struct netlink_ext_ack *extack)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct tc_action_net *tn = net_generic(net, nat_net_id);
42*4882a593Smuzhiyun 	struct nlattr *tb[TCA_NAT_MAX + 1];
43*4882a593Smuzhiyun 	struct tcf_chain *goto_ch = NULL;
44*4882a593Smuzhiyun 	struct tc_nat *parm;
45*4882a593Smuzhiyun 	int ret = 0, err;
46*4882a593Smuzhiyun 	struct tcf_nat *p;
47*4882a593Smuzhiyun 	u32 index;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	if (nla == NULL)
50*4882a593Smuzhiyun 		return -EINVAL;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	err = nla_parse_nested_deprecated(tb, TCA_NAT_MAX, nla, nat_policy,
53*4882a593Smuzhiyun 					  NULL);
54*4882a593Smuzhiyun 	if (err < 0)
55*4882a593Smuzhiyun 		return err;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (tb[TCA_NAT_PARMS] == NULL)
58*4882a593Smuzhiyun 		return -EINVAL;
59*4882a593Smuzhiyun 	parm = nla_data(tb[TCA_NAT_PARMS]);
60*4882a593Smuzhiyun 	index = parm->index;
61*4882a593Smuzhiyun 	err = tcf_idr_check_alloc(tn, &index, a, bind);
62*4882a593Smuzhiyun 	if (!err) {
63*4882a593Smuzhiyun 		ret = tcf_idr_create(tn, index, est, a,
64*4882a593Smuzhiyun 				     &act_nat_ops, bind, false, 0);
65*4882a593Smuzhiyun 		if (ret) {
66*4882a593Smuzhiyun 			tcf_idr_cleanup(tn, index);
67*4882a593Smuzhiyun 			return ret;
68*4882a593Smuzhiyun 		}
69*4882a593Smuzhiyun 		ret = ACT_P_CREATED;
70*4882a593Smuzhiyun 	} else if (err > 0) {
71*4882a593Smuzhiyun 		if (bind)
72*4882a593Smuzhiyun 			return 0;
73*4882a593Smuzhiyun 		if (!ovr) {
74*4882a593Smuzhiyun 			tcf_idr_release(*a, bind);
75*4882a593Smuzhiyun 			return -EEXIST;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 	} else {
78*4882a593Smuzhiyun 		return err;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
81*4882a593Smuzhiyun 	if (err < 0)
82*4882a593Smuzhiyun 		goto release_idr;
83*4882a593Smuzhiyun 	p = to_tcf_nat(*a);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	spin_lock_bh(&p->tcf_lock);
86*4882a593Smuzhiyun 	p->old_addr = parm->old_addr;
87*4882a593Smuzhiyun 	p->new_addr = parm->new_addr;
88*4882a593Smuzhiyun 	p->mask = parm->mask;
89*4882a593Smuzhiyun 	p->flags = parm->flags;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
92*4882a593Smuzhiyun 	spin_unlock_bh(&p->tcf_lock);
93*4882a593Smuzhiyun 	if (goto_ch)
94*4882a593Smuzhiyun 		tcf_chain_put_by_act(goto_ch);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	return ret;
97*4882a593Smuzhiyun release_idr:
98*4882a593Smuzhiyun 	tcf_idr_release(*a, bind);
99*4882a593Smuzhiyun 	return err;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
tcf_nat_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)102*4882a593Smuzhiyun static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
103*4882a593Smuzhiyun 		       struct tcf_result *res)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct tcf_nat *p = to_tcf_nat(a);
106*4882a593Smuzhiyun 	struct iphdr *iph;
107*4882a593Smuzhiyun 	__be32 old_addr;
108*4882a593Smuzhiyun 	__be32 new_addr;
109*4882a593Smuzhiyun 	__be32 mask;
110*4882a593Smuzhiyun 	__be32 addr;
111*4882a593Smuzhiyun 	int egress;
112*4882a593Smuzhiyun 	int action;
113*4882a593Smuzhiyun 	int ihl;
114*4882a593Smuzhiyun 	int noff;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	spin_lock(&p->tcf_lock);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	tcf_lastuse_update(&p->tcf_tm);
119*4882a593Smuzhiyun 	old_addr = p->old_addr;
120*4882a593Smuzhiyun 	new_addr = p->new_addr;
121*4882a593Smuzhiyun 	mask = p->mask;
122*4882a593Smuzhiyun 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
123*4882a593Smuzhiyun 	action = p->tcf_action;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	bstats_update(&p->tcf_bstats, skb);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	spin_unlock(&p->tcf_lock);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (unlikely(action == TC_ACT_SHOT))
130*4882a593Smuzhiyun 		goto drop;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	noff = skb_network_offset(skb);
133*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, sizeof(*iph) + noff))
134*4882a593Smuzhiyun 		goto drop;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	iph = ip_hdr(skb);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (egress)
139*4882a593Smuzhiyun 		addr = iph->saddr;
140*4882a593Smuzhiyun 	else
141*4882a593Smuzhiyun 		addr = iph->daddr;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (!((old_addr ^ addr) & mask)) {
144*4882a593Smuzhiyun 		if (skb_try_make_writable(skb, sizeof(*iph) + noff))
145*4882a593Smuzhiyun 			goto drop;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		new_addr &= mask;
148*4882a593Smuzhiyun 		new_addr |= addr & ~mask;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		/* Rewrite IP header */
151*4882a593Smuzhiyun 		iph = ip_hdr(skb);
152*4882a593Smuzhiyun 		if (egress)
153*4882a593Smuzhiyun 			iph->saddr = new_addr;
154*4882a593Smuzhiyun 		else
155*4882a593Smuzhiyun 			iph->daddr = new_addr;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		csum_replace4(&iph->check, addr, new_addr);
158*4882a593Smuzhiyun 	} else if ((iph->frag_off & htons(IP_OFFSET)) ||
159*4882a593Smuzhiyun 		   iph->protocol != IPPROTO_ICMP) {
160*4882a593Smuzhiyun 		goto out;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	ihl = iph->ihl * 4;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* It would be nice to share code with stateful NAT. */
166*4882a593Smuzhiyun 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
167*4882a593Smuzhiyun 	case IPPROTO_TCP:
168*4882a593Smuzhiyun 	{
169*4882a593Smuzhiyun 		struct tcphdr *tcph;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
172*4882a593Smuzhiyun 		    skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
173*4882a593Smuzhiyun 			goto drop;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		tcph = (void *)(skb_network_header(skb) + ihl);
176*4882a593Smuzhiyun 		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
177*4882a593Smuzhiyun 					 true);
178*4882a593Smuzhiyun 		break;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 	case IPPROTO_UDP:
181*4882a593Smuzhiyun 	{
182*4882a593Smuzhiyun 		struct udphdr *udph;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
185*4882a593Smuzhiyun 		    skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
186*4882a593Smuzhiyun 			goto drop;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		udph = (void *)(skb_network_header(skb) + ihl);
189*4882a593Smuzhiyun 		if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
190*4882a593Smuzhiyun 			inet_proto_csum_replace4(&udph->check, skb, addr,
191*4882a593Smuzhiyun 						 new_addr, true);
192*4882a593Smuzhiyun 			if (!udph->check)
193*4882a593Smuzhiyun 				udph->check = CSUM_MANGLED_0;
194*4882a593Smuzhiyun 		}
195*4882a593Smuzhiyun 		break;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 	case IPPROTO_ICMP:
198*4882a593Smuzhiyun 	{
199*4882a593Smuzhiyun 		struct icmphdr *icmph;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
202*4882a593Smuzhiyun 			goto drop;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		icmph = (void *)(skb_network_header(skb) + ihl);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		if (!icmp_is_err(icmph->type))
207*4882a593Smuzhiyun 			break;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
210*4882a593Smuzhiyun 					noff))
211*4882a593Smuzhiyun 			goto drop;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		icmph = (void *)(skb_network_header(skb) + ihl);
214*4882a593Smuzhiyun 		iph = (void *)(icmph + 1);
215*4882a593Smuzhiyun 		if (egress)
216*4882a593Smuzhiyun 			addr = iph->daddr;
217*4882a593Smuzhiyun 		else
218*4882a593Smuzhiyun 			addr = iph->saddr;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		if ((old_addr ^ addr) & mask)
221*4882a593Smuzhiyun 			break;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 		if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
224*4882a593Smuzhiyun 					  sizeof(*iph) + noff))
225*4882a593Smuzhiyun 			goto drop;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		icmph = (void *)(skb_network_header(skb) + ihl);
228*4882a593Smuzhiyun 		iph = (void *)(icmph + 1);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		new_addr &= mask;
231*4882a593Smuzhiyun 		new_addr |= addr & ~mask;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		/* XXX Fix up the inner checksums. */
234*4882a593Smuzhiyun 		if (egress)
235*4882a593Smuzhiyun 			iph->daddr = new_addr;
236*4882a593Smuzhiyun 		else
237*4882a593Smuzhiyun 			iph->saddr = new_addr;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
240*4882a593Smuzhiyun 					 false);
241*4882a593Smuzhiyun 		break;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 	default:
244*4882a593Smuzhiyun 		break;
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun out:
248*4882a593Smuzhiyun 	return action;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun drop:
251*4882a593Smuzhiyun 	spin_lock(&p->tcf_lock);
252*4882a593Smuzhiyun 	p->tcf_qstats.drops++;
253*4882a593Smuzhiyun 	spin_unlock(&p->tcf_lock);
254*4882a593Smuzhiyun 	return TC_ACT_SHOT;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
tcf_nat_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)257*4882a593Smuzhiyun static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
258*4882a593Smuzhiyun 			int bind, int ref)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	unsigned char *b = skb_tail_pointer(skb);
261*4882a593Smuzhiyun 	struct tcf_nat *p = to_tcf_nat(a);
262*4882a593Smuzhiyun 	struct tc_nat opt = {
263*4882a593Smuzhiyun 		.index    = p->tcf_index,
264*4882a593Smuzhiyun 		.refcnt   = refcount_read(&p->tcf_refcnt) - ref,
265*4882a593Smuzhiyun 		.bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
266*4882a593Smuzhiyun 	};
267*4882a593Smuzhiyun 	struct tcf_t t;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	spin_lock_bh(&p->tcf_lock);
270*4882a593Smuzhiyun 	opt.old_addr = p->old_addr;
271*4882a593Smuzhiyun 	opt.new_addr = p->new_addr;
272*4882a593Smuzhiyun 	opt.mask = p->mask;
273*4882a593Smuzhiyun 	opt.flags = p->flags;
274*4882a593Smuzhiyun 	opt.action = p->tcf_action;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
277*4882a593Smuzhiyun 		goto nla_put_failure;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	tcf_tm_dump(&t, &p->tcf_tm);
280*4882a593Smuzhiyun 	if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
281*4882a593Smuzhiyun 		goto nla_put_failure;
282*4882a593Smuzhiyun 	spin_unlock_bh(&p->tcf_lock);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	return skb->len;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun nla_put_failure:
287*4882a593Smuzhiyun 	spin_unlock_bh(&p->tcf_lock);
288*4882a593Smuzhiyun 	nlmsg_trim(skb, b);
289*4882a593Smuzhiyun 	return -1;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
tcf_nat_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)292*4882a593Smuzhiyun static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
293*4882a593Smuzhiyun 			  struct netlink_callback *cb, int type,
294*4882a593Smuzhiyun 			  const struct tc_action_ops *ops,
295*4882a593Smuzhiyun 			  struct netlink_ext_ack *extack)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct tc_action_net *tn = net_generic(net, nat_net_id);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
tcf_nat_search(struct net * net,struct tc_action ** a,u32 index)302*4882a593Smuzhiyun static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct tc_action_net *tn = net_generic(net, nat_net_id);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return tcf_idr_search(tn, a, index);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun static struct tc_action_ops act_nat_ops = {
310*4882a593Smuzhiyun 	.kind		=	"nat",
311*4882a593Smuzhiyun 	.id		=	TCA_ID_NAT,
312*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
313*4882a593Smuzhiyun 	.act		=	tcf_nat_act,
314*4882a593Smuzhiyun 	.dump		=	tcf_nat_dump,
315*4882a593Smuzhiyun 	.init		=	tcf_nat_init,
316*4882a593Smuzhiyun 	.walk		=	tcf_nat_walker,
317*4882a593Smuzhiyun 	.lookup		=	tcf_nat_search,
318*4882a593Smuzhiyun 	.size		=	sizeof(struct tcf_nat),
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun 
nat_init_net(struct net * net)321*4882a593Smuzhiyun static __net_init int nat_init_net(struct net *net)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct tc_action_net *tn = net_generic(net, nat_net_id);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return tc_action_net_init(net, tn, &act_nat_ops);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
nat_exit_net(struct list_head * net_list)328*4882a593Smuzhiyun static void __net_exit nat_exit_net(struct list_head *net_list)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	tc_action_net_exit(net_list, nat_net_id);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun static struct pernet_operations nat_net_ops = {
334*4882a593Smuzhiyun 	.init = nat_init_net,
335*4882a593Smuzhiyun 	.exit_batch = nat_exit_net,
336*4882a593Smuzhiyun 	.id   = &nat_net_id,
337*4882a593Smuzhiyun 	.size = sizeof(struct tc_action_net),
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun MODULE_DESCRIPTION("Stateless NAT actions");
341*4882a593Smuzhiyun MODULE_LICENSE("GPL");
342*4882a593Smuzhiyun 
nat_init_module(void)343*4882a593Smuzhiyun static int __init nat_init_module(void)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	return tcf_register_action(&act_nat_ops, &nat_net_ops);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
nat_cleanup_module(void)348*4882a593Smuzhiyun static void __exit nat_cleanup_module(void)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	tcf_unregister_action(&act_nat_ops, &nat_net_ops);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun module_init(nat_init_module);
354*4882a593Smuzhiyun module_exit(nat_cleanup_module);
355