xref: /OK3568_Linux_fs/kernel/net/x25/x25_forward.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	History
4*4882a593Smuzhiyun  *	03-01-2007	Added forwarding for x.25	Andrew Hendry
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define pr_fmt(fmt) "X25: " fmt
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/if_arp.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <net/x25.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun LIST_HEAD(x25_forward_list);
15*4882a593Smuzhiyun DEFINE_RWLOCK(x25_forward_list_lock);
16*4882a593Smuzhiyun 
x25_forward_call(struct x25_address * dest_addr,struct x25_neigh * from,struct sk_buff * skb,int lci)17*4882a593Smuzhiyun int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
18*4882a593Smuzhiyun 			struct sk_buff *skb, int lci)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	struct x25_route *rt;
21*4882a593Smuzhiyun 	struct x25_neigh *neigh_new = NULL;
22*4882a593Smuzhiyun 	struct list_head *entry;
23*4882a593Smuzhiyun 	struct x25_forward *x25_frwd, *new_frwd;
24*4882a593Smuzhiyun 	struct sk_buff *skbn;
25*4882a593Smuzhiyun 	short same_lci = 0;
26*4882a593Smuzhiyun 	int rc = 0;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	if ((rt = x25_get_route(dest_addr)) == NULL)
29*4882a593Smuzhiyun 		goto out_no_route;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
32*4882a593Smuzhiyun 		/* This shouldn't happen, if it occurs somehow
33*4882a593Smuzhiyun 		 * do something sensible
34*4882a593Smuzhiyun 		 */
35*4882a593Smuzhiyun 		goto out_put_route;
36*4882a593Smuzhiyun 	}
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	/* Avoid a loop. This is the normal exit path for a
39*4882a593Smuzhiyun 	 * system with only one x.25 iface and default route
40*4882a593Smuzhiyun 	 */
41*4882a593Smuzhiyun 	if (rt->dev == from->dev) {
42*4882a593Smuzhiyun 		goto out_put_nb;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* Remote end sending a call request on an already
46*4882a593Smuzhiyun 	 * established LCI? It shouldn't happen, just in case..
47*4882a593Smuzhiyun 	 */
48*4882a593Smuzhiyun 	read_lock_bh(&x25_forward_list_lock);
49*4882a593Smuzhiyun 	list_for_each(entry, &x25_forward_list) {
50*4882a593Smuzhiyun 		x25_frwd = list_entry(entry, struct x25_forward, node);
51*4882a593Smuzhiyun 		if (x25_frwd->lci == lci) {
52*4882a593Smuzhiyun 			pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
53*4882a593Smuzhiyun 			same_lci = 1;
54*4882a593Smuzhiyun 		}
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 	read_unlock_bh(&x25_forward_list_lock);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* Save the forwarding details for future traffic */
59*4882a593Smuzhiyun 	if (!same_lci){
60*4882a593Smuzhiyun 		if ((new_frwd = kmalloc(sizeof(struct x25_forward),
61*4882a593Smuzhiyun 						GFP_ATOMIC)) == NULL){
62*4882a593Smuzhiyun 			rc = -ENOMEM;
63*4882a593Smuzhiyun 			goto out_put_nb;
64*4882a593Smuzhiyun 		}
65*4882a593Smuzhiyun 		new_frwd->lci = lci;
66*4882a593Smuzhiyun 		new_frwd->dev1 = rt->dev;
67*4882a593Smuzhiyun 		new_frwd->dev2 = from->dev;
68*4882a593Smuzhiyun 		write_lock_bh(&x25_forward_list_lock);
69*4882a593Smuzhiyun 		list_add(&new_frwd->node, &x25_forward_list);
70*4882a593Smuzhiyun 		write_unlock_bh(&x25_forward_list_lock);
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* Forward the call request */
74*4882a593Smuzhiyun 	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
75*4882a593Smuzhiyun 		goto out_put_nb;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 	x25_transmit_link(skbn, neigh_new);
78*4882a593Smuzhiyun 	rc = 1;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun out_put_nb:
82*4882a593Smuzhiyun 	x25_neigh_put(neigh_new);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun out_put_route:
85*4882a593Smuzhiyun 	x25_route_put(rt);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun out_no_route:
88*4882a593Smuzhiyun 	return rc;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 
x25_forward_data(int lci,struct x25_neigh * from,struct sk_buff * skb)92*4882a593Smuzhiyun int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	struct x25_forward *frwd;
95*4882a593Smuzhiyun 	struct list_head *entry;
96*4882a593Smuzhiyun 	struct net_device *peer = NULL;
97*4882a593Smuzhiyun 	struct x25_neigh *nb;
98*4882a593Smuzhiyun 	struct sk_buff *skbn;
99*4882a593Smuzhiyun 	int rc = 0;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	read_lock_bh(&x25_forward_list_lock);
102*4882a593Smuzhiyun 	list_for_each(entry, &x25_forward_list) {
103*4882a593Smuzhiyun 		frwd = list_entry(entry, struct x25_forward, node);
104*4882a593Smuzhiyun 		if (frwd->lci == lci) {
105*4882a593Smuzhiyun 			/* The call is established, either side can send */
106*4882a593Smuzhiyun 			if (from->dev == frwd->dev1) {
107*4882a593Smuzhiyun 				peer = frwd->dev2;
108*4882a593Smuzhiyun 			} else {
109*4882a593Smuzhiyun 				peer = frwd->dev1;
110*4882a593Smuzhiyun 			}
111*4882a593Smuzhiyun 			break;
112*4882a593Smuzhiyun 		}
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 	read_unlock_bh(&x25_forward_list_lock);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if ( (nb = x25_get_neigh(peer)) == NULL)
117*4882a593Smuzhiyun 		goto out;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
120*4882a593Smuzhiyun 		goto output;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	x25_transmit_link(skbn, nb);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	rc = 1;
126*4882a593Smuzhiyun output:
127*4882a593Smuzhiyun 	x25_neigh_put(nb);
128*4882a593Smuzhiyun out:
129*4882a593Smuzhiyun 	return rc;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
x25_clear_forward_by_lci(unsigned int lci)132*4882a593Smuzhiyun void x25_clear_forward_by_lci(unsigned int lci)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct x25_forward *fwd, *tmp;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	write_lock_bh(&x25_forward_list_lock);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
139*4882a593Smuzhiyun 		if (fwd->lci == lci) {
140*4882a593Smuzhiyun 			list_del(&fwd->node);
141*4882a593Smuzhiyun 			kfree(fwd);
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	write_unlock_bh(&x25_forward_list_lock);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 
x25_clear_forward_by_dev(struct net_device * dev)148*4882a593Smuzhiyun void x25_clear_forward_by_dev(struct net_device *dev)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct x25_forward *fwd, *tmp;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	write_lock_bh(&x25_forward_list_lock);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
155*4882a593Smuzhiyun 		if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
156*4882a593Smuzhiyun 			list_del(&fwd->node);
157*4882a593Smuzhiyun 			kfree(fwd);
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 	write_unlock_bh(&x25_forward_list_lock);
161*4882a593Smuzhiyun }
162