xref: /OK3568_Linux_fs/kernel/drivers/infiniband/core/netlink.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2017 Mellanox Technologies Inc.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2010 Voltaire Inc.  All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/export.h>
37*4882a593Smuzhiyun #include <net/netlink.h>
38*4882a593Smuzhiyun #include <net/net_namespace.h>
39*4882a593Smuzhiyun #include <net/netns/generic.h>
40*4882a593Smuzhiyun #include <net/sock.h>
41*4882a593Smuzhiyun #include <rdma/rdma_netlink.h>
42*4882a593Smuzhiyun #include <linux/module.h>
43*4882a593Smuzhiyun #include "core_priv.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static struct {
46*4882a593Smuzhiyun 	const struct rdma_nl_cbs *cb_table;
47*4882a593Smuzhiyun 	/* Synchronizes between ongoing netlink commands and netlink client
48*4882a593Smuzhiyun 	 * unregistration.
49*4882a593Smuzhiyun 	 */
50*4882a593Smuzhiyun 	struct rw_semaphore sem;
51*4882a593Smuzhiyun } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
52*4882a593Smuzhiyun 
rdma_nl_chk_listeners(unsigned int group)53*4882a593Smuzhiyun bool rdma_nl_chk_listeners(unsigned int group)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	return netlink_has_listeners(rnet->nl_sock, group);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun EXPORT_SYMBOL(rdma_nl_chk_listeners);
60*4882a593Smuzhiyun 
is_nl_msg_valid(unsigned int type,unsigned int op)61*4882a593Smuzhiyun static bool is_nl_msg_valid(unsigned int type, unsigned int op)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
64*4882a593Smuzhiyun 		[RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
65*4882a593Smuzhiyun 		[RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
66*4882a593Smuzhiyun 		[RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
67*4882a593Smuzhiyun 	};
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/*
70*4882a593Smuzhiyun 	 * This BUILD_BUG_ON is intended to catch addition of new
71*4882a593Smuzhiyun 	 * RDMA netlink protocol without updating the array above.
72*4882a593Smuzhiyun 	 */
73*4882a593Smuzhiyun 	BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (type >= RDMA_NL_NUM_CLIENTS)
76*4882a593Smuzhiyun 		return false;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return (op < max_num_ops[type]) ? true : false;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static const struct rdma_nl_cbs *
get_cb_table(const struct sk_buff * skb,unsigned int type,unsigned int op)82*4882a593Smuzhiyun get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	const struct rdma_nl_cbs *cb_table;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/*
87*4882a593Smuzhiyun 	 * Currently only NLDEV client is supporting netlink commands in
88*4882a593Smuzhiyun 	 * non init_net net namespace.
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun 	if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
91*4882a593Smuzhiyun 		return NULL;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
94*4882a593Smuzhiyun 	if (!cb_table) {
95*4882a593Smuzhiyun 		/*
96*4882a593Smuzhiyun 		 * Didn't get valid reference of the table, attempt module
97*4882a593Smuzhiyun 		 * load once.
98*4882a593Smuzhiyun 		 */
99*4882a593Smuzhiyun 		up_read(&rdma_nl_types[type].sem);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 		request_module("rdma-netlink-subsys-%d", type);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		down_read(&rdma_nl_types[type].sem);
104*4882a593Smuzhiyun 		cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 	if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
107*4882a593Smuzhiyun 		return NULL;
108*4882a593Smuzhiyun 	return cb_table;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
rdma_nl_register(unsigned int index,const struct rdma_nl_cbs cb_table[])111*4882a593Smuzhiyun void rdma_nl_register(unsigned int index,
112*4882a593Smuzhiyun 		      const struct rdma_nl_cbs cb_table[])
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
115*4882a593Smuzhiyun 	    WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
116*4882a593Smuzhiyun 		return;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Pairs with the READ_ONCE in is_nl_valid() */
119*4882a593Smuzhiyun 	smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun EXPORT_SYMBOL(rdma_nl_register);
122*4882a593Smuzhiyun 
rdma_nl_unregister(unsigned int index)123*4882a593Smuzhiyun void rdma_nl_unregister(unsigned int index)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	down_write(&rdma_nl_types[index].sem);
126*4882a593Smuzhiyun 	rdma_nl_types[index].cb_table = NULL;
127*4882a593Smuzhiyun 	up_write(&rdma_nl_types[index].sem);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun EXPORT_SYMBOL(rdma_nl_unregister);
130*4882a593Smuzhiyun 
ibnl_put_msg(struct sk_buff * skb,struct nlmsghdr ** nlh,int seq,int len,int client,int op,int flags)131*4882a593Smuzhiyun void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
132*4882a593Smuzhiyun 		   int len, int client, int op, int flags)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	*nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
135*4882a593Smuzhiyun 	if (!*nlh)
136*4882a593Smuzhiyun 		return NULL;
137*4882a593Smuzhiyun 	return nlmsg_data(*nlh);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun EXPORT_SYMBOL(ibnl_put_msg);
140*4882a593Smuzhiyun 
ibnl_put_attr(struct sk_buff * skb,struct nlmsghdr * nlh,int len,void * data,int type)141*4882a593Smuzhiyun int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
142*4882a593Smuzhiyun 		  int len, void *data, int type)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	if (nla_put(skb, type, len, data)) {
145*4882a593Smuzhiyun 		nlmsg_cancel(skb, nlh);
146*4882a593Smuzhiyun 		return -EMSGSIZE;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun EXPORT_SYMBOL(ibnl_put_attr);
151*4882a593Smuzhiyun 
rdma_nl_rcv_msg(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)152*4882a593Smuzhiyun static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
153*4882a593Smuzhiyun 			   struct netlink_ext_ack *extack)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	int type = nlh->nlmsg_type;
156*4882a593Smuzhiyun 	unsigned int index = RDMA_NL_GET_CLIENT(type);
157*4882a593Smuzhiyun 	unsigned int op = RDMA_NL_GET_OP(type);
158*4882a593Smuzhiyun 	const struct rdma_nl_cbs *cb_table;
159*4882a593Smuzhiyun 	int err = -EINVAL;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (!is_nl_msg_valid(index, op))
162*4882a593Smuzhiyun 		return -EINVAL;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	down_read(&rdma_nl_types[index].sem);
165*4882a593Smuzhiyun 	cb_table = get_cb_table(skb, index, op);
166*4882a593Smuzhiyun 	if (!cb_table)
167*4882a593Smuzhiyun 		goto done;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
170*4882a593Smuzhiyun 	    !netlink_capable(skb, CAP_NET_ADMIN)) {
171*4882a593Smuzhiyun 		err = -EPERM;
172*4882a593Smuzhiyun 		goto done;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/*
176*4882a593Smuzhiyun 	 * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
177*4882a593Smuzhiyun 	 * mistakenly call the .dump() function.
178*4882a593Smuzhiyun 	 */
179*4882a593Smuzhiyun 	if (index == RDMA_NL_LS) {
180*4882a593Smuzhiyun 		if (cb_table[op].doit)
181*4882a593Smuzhiyun 			err = cb_table[op].doit(skb, nlh, extack);
182*4882a593Smuzhiyun 		goto done;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 	/* FIXME: Convert IWCM to properly handle doit callbacks */
185*4882a593Smuzhiyun 	if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
186*4882a593Smuzhiyun 		struct netlink_dump_control c = {
187*4882a593Smuzhiyun 			.dump = cb_table[op].dump,
188*4882a593Smuzhiyun 		};
189*4882a593Smuzhiyun 		if (c.dump)
190*4882a593Smuzhiyun 			err = netlink_dump_start(skb->sk, skb, nlh, &c);
191*4882a593Smuzhiyun 		goto done;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (cb_table[op].doit)
195*4882a593Smuzhiyun 		err = cb_table[op].doit(skb, nlh, extack);
196*4882a593Smuzhiyun done:
197*4882a593Smuzhiyun 	up_read(&rdma_nl_types[index].sem);
198*4882a593Smuzhiyun 	return err;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun  * This function is similar to netlink_rcv_skb with one exception:
203*4882a593Smuzhiyun  * It calls to the callback for the netlink messages without NLM_F_REQUEST
204*4882a593Smuzhiyun  * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
205*4882a593Smuzhiyun  * for that consumer only.
206*4882a593Smuzhiyun  */
rdma_nl_rcv_skb(struct sk_buff * skb,int (* cb)(struct sk_buff *,struct nlmsghdr *,struct netlink_ext_ack *))207*4882a593Smuzhiyun static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
208*4882a593Smuzhiyun 						   struct nlmsghdr *,
209*4882a593Smuzhiyun 						   struct netlink_ext_ack *))
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct netlink_ext_ack extack = {};
212*4882a593Smuzhiyun 	struct nlmsghdr *nlh;
213*4882a593Smuzhiyun 	int err;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	while (skb->len >= nlmsg_total_size(0)) {
216*4882a593Smuzhiyun 		int msglen;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		nlh = nlmsg_hdr(skb);
219*4882a593Smuzhiyun 		err = 0;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
222*4882a593Smuzhiyun 			return 0;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		/*
225*4882a593Smuzhiyun 		 * Generally speaking, the only requests are handled
226*4882a593Smuzhiyun 		 * by the kernel, but RDMA_NL_LS is different, because it
227*4882a593Smuzhiyun 		 * runs backward netlink scheme. Kernel initiates messages
228*4882a593Smuzhiyun 		 * and waits for reply with data to keep pathrecord cache
229*4882a593Smuzhiyun 		 * in sync.
230*4882a593Smuzhiyun 		 */
231*4882a593Smuzhiyun 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
232*4882a593Smuzhiyun 		    (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
233*4882a593Smuzhiyun 			goto ack;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 		/* Skip control messages */
236*4882a593Smuzhiyun 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
237*4882a593Smuzhiyun 			goto ack;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		err = cb(skb, nlh, &extack);
240*4882a593Smuzhiyun 		if (err == -EINTR)
241*4882a593Smuzhiyun 			goto skip;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun ack:
244*4882a593Smuzhiyun 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
245*4882a593Smuzhiyun 			netlink_ack(skb, nlh, err, &extack);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun skip:
248*4882a593Smuzhiyun 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
249*4882a593Smuzhiyun 		if (msglen > skb->len)
250*4882a593Smuzhiyun 			msglen = skb->len;
251*4882a593Smuzhiyun 		skb_pull(skb, msglen);
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
rdma_nl_rcv(struct sk_buff * skb)257*4882a593Smuzhiyun static void rdma_nl_rcv(struct sk_buff *skb)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
rdma_nl_unicast(struct net * net,struct sk_buff * skb,u32 pid)262*4882a593Smuzhiyun int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
265*4882a593Smuzhiyun 	int err;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
268*4882a593Smuzhiyun 	return (err < 0) ? err : 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun EXPORT_SYMBOL(rdma_nl_unicast);
271*4882a593Smuzhiyun 
rdma_nl_unicast_wait(struct net * net,struct sk_buff * skb,__u32 pid)272*4882a593Smuzhiyun int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
275*4882a593Smuzhiyun 	int err;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
278*4882a593Smuzhiyun 	return (err < 0) ? err : 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun EXPORT_SYMBOL(rdma_nl_unicast_wait);
281*4882a593Smuzhiyun 
rdma_nl_multicast(struct net * net,struct sk_buff * skb,unsigned int group,gfp_t flags)282*4882a593Smuzhiyun int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
283*4882a593Smuzhiyun 		      unsigned int group, gfp_t flags)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun EXPORT_SYMBOL(rdma_nl_multicast);
290*4882a593Smuzhiyun 
rdma_nl_init(void)291*4882a593Smuzhiyun void rdma_nl_init(void)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	int idx;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
296*4882a593Smuzhiyun 		init_rwsem(&rdma_nl_types[idx].sem);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
rdma_nl_exit(void)299*4882a593Smuzhiyun void rdma_nl_exit(void)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	int idx;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
304*4882a593Smuzhiyun 		WARN(rdma_nl_types[idx].cb_table,
305*4882a593Smuzhiyun 		     "Netlink client %d wasn't released prior to unloading %s\n",
306*4882a593Smuzhiyun 		     idx, KBUILD_MODNAME);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
rdma_nl_net_init(struct rdma_dev_net * rnet)309*4882a593Smuzhiyun int rdma_nl_net_init(struct rdma_dev_net *rnet)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct net *net = read_pnet(&rnet->net);
312*4882a593Smuzhiyun 	struct netlink_kernel_cfg cfg = {
313*4882a593Smuzhiyun 		.input	= rdma_nl_rcv,
314*4882a593Smuzhiyun 	};
315*4882a593Smuzhiyun 	struct sock *nls;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
318*4882a593Smuzhiyun 	if (!nls)
319*4882a593Smuzhiyun 		return -ENOMEM;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	nls->sk_sndtimeo = 10 * HZ;
322*4882a593Smuzhiyun 	rnet->nl_sock = nls;
323*4882a593Smuzhiyun 	return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
rdma_nl_net_exit(struct rdma_dev_net * rnet)326*4882a593Smuzhiyun void rdma_nl_net_exit(struct rdma_dev_net *rnet)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	netlink_kernel_release(rnet->nl_sock);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
332