1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2007-2012 Siemens AG
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Written by:
6*4882a593Smuzhiyun * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
7*4882a593Smuzhiyun * Sergey Lapin <slapin@ossfans.org>
8*4882a593Smuzhiyun * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
9*4882a593Smuzhiyun * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/if_arp.h>
15*4882a593Smuzhiyun #include <linux/ieee802154.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <net/nl802154.h>
18*4882a593Smuzhiyun #include <net/mac802154.h>
19*4882a593Smuzhiyun #include <net/ieee802154_netdev.h>
20*4882a593Smuzhiyun #include <net/cfg802154.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "ieee802154_i.h"
23*4882a593Smuzhiyun #include "driver-ops.h"
24*4882a593Smuzhiyun
mac802154_wpan_update_llsec(struct net_device * dev)25*4882a593Smuzhiyun int mac802154_wpan_update_llsec(struct net_device *dev)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
28*4882a593Smuzhiyun struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
29*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
30*4882a593Smuzhiyun int rc = 0;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun if (ops->llsec) {
33*4882a593Smuzhiyun struct ieee802154_llsec_params params;
34*4882a593Smuzhiyun int changed = 0;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun params.pan_id = wpan_dev->pan_id;
37*4882a593Smuzhiyun changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun params.hwaddr = wpan_dev->extended_addr;
40*4882a593Smuzhiyun changed |= IEEE802154_LLSEC_PARAM_HWADDR;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun rc = ops->llsec->set_params(dev, ¶ms, changed);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return rc;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static int
mac802154_wpan_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)49*4882a593Smuzhiyun mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
52*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
53*4882a593Smuzhiyun struct sockaddr_ieee802154 *sa =
54*4882a593Smuzhiyun (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
55*4882a593Smuzhiyun int err = -ENOIOCTLCMD;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR)
58*4882a593Smuzhiyun return err;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun rtnl_lock();
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun switch (cmd) {
63*4882a593Smuzhiyun case SIOCGIFADDR:
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun u16 pan_id, short_addr;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun pan_id = le16_to_cpu(wpan_dev->pan_id);
68*4882a593Smuzhiyun short_addr = le16_to_cpu(wpan_dev->short_addr);
69*4882a593Smuzhiyun if (pan_id == IEEE802154_PANID_BROADCAST ||
70*4882a593Smuzhiyun short_addr == IEEE802154_ADDR_BROADCAST) {
71*4882a593Smuzhiyun err = -EADDRNOTAVAIL;
72*4882a593Smuzhiyun break;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun sa->family = AF_IEEE802154;
76*4882a593Smuzhiyun sa->addr.addr_type = IEEE802154_ADDR_SHORT;
77*4882a593Smuzhiyun sa->addr.pan_id = pan_id;
78*4882a593Smuzhiyun sa->addr.short_addr = short_addr;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun err = 0;
81*4882a593Smuzhiyun break;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun case SIOCSIFADDR:
84*4882a593Smuzhiyun if (netif_running(dev)) {
85*4882a593Smuzhiyun rtnl_unlock();
86*4882a593Smuzhiyun return -EBUSY;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun dev_warn(&dev->dev,
90*4882a593Smuzhiyun "Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n");
91*4882a593Smuzhiyun if (sa->family != AF_IEEE802154 ||
92*4882a593Smuzhiyun sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
93*4882a593Smuzhiyun sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
94*4882a593Smuzhiyun sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
95*4882a593Smuzhiyun sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
96*4882a593Smuzhiyun err = -EINVAL;
97*4882a593Smuzhiyun break;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun wpan_dev->pan_id = cpu_to_le16(sa->addr.pan_id);
101*4882a593Smuzhiyun wpan_dev->short_addr = cpu_to_le16(sa->addr.short_addr);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun err = mac802154_wpan_update_llsec(dev);
104*4882a593Smuzhiyun break;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun rtnl_unlock();
108*4882a593Smuzhiyun return err;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
mac802154_wpan_mac_addr(struct net_device * dev,void * p)111*4882a593Smuzhiyun static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
114*4882a593Smuzhiyun struct sockaddr *addr = p;
115*4882a593Smuzhiyun __le64 extended_addr;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (netif_running(dev))
118*4882a593Smuzhiyun return -EBUSY;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* lowpan need to be down for update
121*4882a593Smuzhiyun * SLAAC address after ifup
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun if (sdata->wpan_dev.lowpan_dev) {
124*4882a593Smuzhiyun if (netif_running(sdata->wpan_dev.lowpan_dev))
125*4882a593Smuzhiyun return -EBUSY;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
129*4882a593Smuzhiyun if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
130*4882a593Smuzhiyun return -EINVAL;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
133*4882a593Smuzhiyun sdata->wpan_dev.extended_addr = extended_addr;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* update lowpan interface mac address when
136*4882a593Smuzhiyun * wpan mac has been changed
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun if (sdata->wpan_dev.lowpan_dev)
139*4882a593Smuzhiyun memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
140*4882a593Smuzhiyun dev->addr_len);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return mac802154_wpan_update_llsec(dev);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
ieee802154_setup_hw(struct ieee802154_sub_if_data * sdata)145*4882a593Smuzhiyun static int ieee802154_setup_hw(struct ieee802154_sub_if_data *sdata)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct ieee802154_local *local = sdata->local;
148*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
149*4882a593Smuzhiyun int ret;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
152*4882a593Smuzhiyun ret = drv_set_promiscuous_mode(local,
153*4882a593Smuzhiyun wpan_dev->promiscuous_mode);
154*4882a593Smuzhiyun if (ret < 0)
155*4882a593Smuzhiyun return ret;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_AFILT) {
159*4882a593Smuzhiyun ret = drv_set_pan_id(local, wpan_dev->pan_id);
160*4882a593Smuzhiyun if (ret < 0)
161*4882a593Smuzhiyun return ret;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun ret = drv_set_extended_addr(local, wpan_dev->extended_addr);
164*4882a593Smuzhiyun if (ret < 0)
165*4882a593Smuzhiyun return ret;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun ret = drv_set_short_addr(local, wpan_dev->short_addr);
168*4882a593Smuzhiyun if (ret < 0)
169*4882a593Smuzhiyun return ret;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_LBT) {
173*4882a593Smuzhiyun ret = drv_set_lbt_mode(local, wpan_dev->lbt);
174*4882a593Smuzhiyun if (ret < 0)
175*4882a593Smuzhiyun return ret;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
179*4882a593Smuzhiyun ret = drv_set_csma_params(local, wpan_dev->min_be,
180*4882a593Smuzhiyun wpan_dev->max_be,
181*4882a593Smuzhiyun wpan_dev->csma_retries);
182*4882a593Smuzhiyun if (ret < 0)
183*4882a593Smuzhiyun return ret;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
187*4882a593Smuzhiyun ret = drv_set_max_frame_retries(local, wpan_dev->frame_retries);
188*4882a593Smuzhiyun if (ret < 0)
189*4882a593Smuzhiyun return ret;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun return 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
mac802154_slave_open(struct net_device * dev)195*4882a593Smuzhiyun static int mac802154_slave_open(struct net_device *dev)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
198*4882a593Smuzhiyun struct ieee802154_local *local = sdata->local;
199*4882a593Smuzhiyun int res;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ASSERT_RTNL();
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun set_bit(SDATA_STATE_RUNNING, &sdata->state);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (!local->open_count) {
206*4882a593Smuzhiyun res = ieee802154_setup_hw(sdata);
207*4882a593Smuzhiyun if (res)
208*4882a593Smuzhiyun goto err;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun res = drv_start(local);
211*4882a593Smuzhiyun if (res)
212*4882a593Smuzhiyun goto err;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun local->open_count++;
216*4882a593Smuzhiyun netif_start_queue(dev);
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun err:
219*4882a593Smuzhiyun /* might already be clear but that doesn't matter */
220*4882a593Smuzhiyun clear_bit(SDATA_STATE_RUNNING, &sdata->state);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return res;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun static int
ieee802154_check_mac_settings(struct ieee802154_local * local,struct wpan_dev * wpan_dev,struct wpan_dev * nwpan_dev)226*4882a593Smuzhiyun ieee802154_check_mac_settings(struct ieee802154_local *local,
227*4882a593Smuzhiyun struct wpan_dev *wpan_dev,
228*4882a593Smuzhiyun struct wpan_dev *nwpan_dev)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun ASSERT_RTNL();
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
233*4882a593Smuzhiyun if (wpan_dev->promiscuous_mode != nwpan_dev->promiscuous_mode)
234*4882a593Smuzhiyun return -EBUSY;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_AFILT) {
238*4882a593Smuzhiyun if (wpan_dev->pan_id != nwpan_dev->pan_id ||
239*4882a593Smuzhiyun wpan_dev->short_addr != nwpan_dev->short_addr ||
240*4882a593Smuzhiyun wpan_dev->extended_addr != nwpan_dev->extended_addr)
241*4882a593Smuzhiyun return -EBUSY;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
245*4882a593Smuzhiyun if (wpan_dev->min_be != nwpan_dev->min_be ||
246*4882a593Smuzhiyun wpan_dev->max_be != nwpan_dev->max_be ||
247*4882a593Smuzhiyun wpan_dev->csma_retries != nwpan_dev->csma_retries)
248*4882a593Smuzhiyun return -EBUSY;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
252*4882a593Smuzhiyun if (wpan_dev->frame_retries != nwpan_dev->frame_retries)
253*4882a593Smuzhiyun return -EBUSY;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (local->hw.flags & IEEE802154_HW_LBT) {
257*4882a593Smuzhiyun if (wpan_dev->lbt != nwpan_dev->lbt)
258*4882a593Smuzhiyun return -EBUSY;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun static int
ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data * sdata,enum nl802154_iftype iftype)265*4882a593Smuzhiyun ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata,
266*4882a593Smuzhiyun enum nl802154_iftype iftype)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct ieee802154_local *local = sdata->local;
269*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
270*4882a593Smuzhiyun struct ieee802154_sub_if_data *nsdata;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* we hold the RTNL here so can safely walk the list */
273*4882a593Smuzhiyun list_for_each_entry(nsdata, &local->interfaces, list) {
274*4882a593Smuzhiyun if (nsdata != sdata && ieee802154_sdata_running(nsdata)) {
275*4882a593Smuzhiyun int ret;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* TODO currently we don't support multiple node types
278*4882a593Smuzhiyun * we need to run skb_clone at rx path. Check if there
279*4882a593Smuzhiyun * exist really an use case if we need to support
280*4882a593Smuzhiyun * multiple node types at the same time.
281*4882a593Smuzhiyun */
282*4882a593Smuzhiyun if (wpan_dev->iftype == NL802154_IFTYPE_NODE &&
283*4882a593Smuzhiyun nsdata->wpan_dev.iftype == NL802154_IFTYPE_NODE)
284*4882a593Smuzhiyun return -EBUSY;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* check all phy mac sublayer settings are the same.
287*4882a593Smuzhiyun * We have only one phy, different values makes trouble.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun ret = ieee802154_check_mac_settings(local, wpan_dev,
290*4882a593Smuzhiyun &nsdata->wpan_dev);
291*4882a593Smuzhiyun if (ret < 0)
292*4882a593Smuzhiyun return ret;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
mac802154_wpan_open(struct net_device * dev)299*4882a593Smuzhiyun static int mac802154_wpan_open(struct net_device *dev)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun int rc;
302*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
303*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun rc = ieee802154_check_concurrent_iface(sdata, wpan_dev->iftype);
306*4882a593Smuzhiyun if (rc < 0)
307*4882a593Smuzhiyun return rc;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun return mac802154_slave_open(dev);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
mac802154_slave_close(struct net_device * dev)312*4882a593Smuzhiyun static int mac802154_slave_close(struct net_device *dev)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
315*4882a593Smuzhiyun struct ieee802154_local *local = sdata->local;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun ASSERT_RTNL();
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun netif_stop_queue(dev);
320*4882a593Smuzhiyun local->open_count--;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun clear_bit(SDATA_STATE_RUNNING, &sdata->state);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (!local->open_count)
325*4882a593Smuzhiyun ieee802154_stop_device(local);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return 0;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
mac802154_set_header_security(struct ieee802154_sub_if_data * sdata,struct ieee802154_hdr * hdr,const struct ieee802154_mac_cb * cb)330*4882a593Smuzhiyun static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata,
331*4882a593Smuzhiyun struct ieee802154_hdr *hdr,
332*4882a593Smuzhiyun const struct ieee802154_mac_cb *cb)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct ieee802154_llsec_params params;
335*4882a593Smuzhiyun u8 level;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun mac802154_llsec_get_params(&sdata->sec, ¶ms);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (!params.enabled && cb->secen_override && cb->secen)
340*4882a593Smuzhiyun return -EINVAL;
341*4882a593Smuzhiyun if (!params.enabled ||
342*4882a593Smuzhiyun (cb->secen_override && !cb->secen) ||
343*4882a593Smuzhiyun !params.out_level)
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun if (cb->seclevel_override && !cb->seclevel)
346*4882a593Smuzhiyun return -EINVAL;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun level = cb->seclevel_override ? cb->seclevel : params.out_level;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun hdr->fc.security_enabled = 1;
351*4882a593Smuzhiyun hdr->sec.level = level;
352*4882a593Smuzhiyun hdr->sec.key_id_mode = params.out_key.mode;
353*4882a593Smuzhiyun if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
354*4882a593Smuzhiyun hdr->sec.short_src = params.out_key.short_source;
355*4882a593Smuzhiyun else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
356*4882a593Smuzhiyun hdr->sec.extended_src = params.out_key.extended_source;
357*4882a593Smuzhiyun hdr->sec.key_id = params.out_key.id;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
ieee802154_header_create(struct sk_buff * skb,struct net_device * dev,const struct ieee802154_addr * daddr,const struct ieee802154_addr * saddr,unsigned len)362*4882a593Smuzhiyun static int ieee802154_header_create(struct sk_buff *skb,
363*4882a593Smuzhiyun struct net_device *dev,
364*4882a593Smuzhiyun const struct ieee802154_addr *daddr,
365*4882a593Smuzhiyun const struct ieee802154_addr *saddr,
366*4882a593Smuzhiyun unsigned len)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct ieee802154_hdr hdr;
369*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
370*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
371*4882a593Smuzhiyun struct ieee802154_mac_cb *cb = mac_cb(skb);
372*4882a593Smuzhiyun int hlen;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (!daddr)
375*4882a593Smuzhiyun return -EINVAL;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun memset(&hdr.fc, 0, sizeof(hdr.fc));
378*4882a593Smuzhiyun hdr.fc.type = cb->type;
379*4882a593Smuzhiyun hdr.fc.security_enabled = cb->secen;
380*4882a593Smuzhiyun hdr.fc.ack_request = cb->ackreq;
381*4882a593Smuzhiyun hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
384*4882a593Smuzhiyun return -EINVAL;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (!saddr) {
387*4882a593Smuzhiyun if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
388*4882a593Smuzhiyun wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
389*4882a593Smuzhiyun wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
390*4882a593Smuzhiyun hdr.source.mode = IEEE802154_ADDR_LONG;
391*4882a593Smuzhiyun hdr.source.extended_addr = wpan_dev->extended_addr;
392*4882a593Smuzhiyun } else {
393*4882a593Smuzhiyun hdr.source.mode = IEEE802154_ADDR_SHORT;
394*4882a593Smuzhiyun hdr.source.short_addr = wpan_dev->short_addr;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun hdr.source.pan_id = wpan_dev->pan_id;
398*4882a593Smuzhiyun } else {
399*4882a593Smuzhiyun hdr.source = *(const struct ieee802154_addr *)saddr;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun hdr.dest = *(const struct ieee802154_addr *)daddr;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun hlen = ieee802154_hdr_push(skb, &hdr);
405*4882a593Smuzhiyun if (hlen < 0)
406*4882a593Smuzhiyun return -EINVAL;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun skb_reset_mac_header(skb);
409*4882a593Smuzhiyun skb->mac_len = hlen;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (len > ieee802154_max_payload(&hdr))
412*4882a593Smuzhiyun return -EMSGSIZE;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun return hlen;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun static const struct wpan_dev_header_ops ieee802154_header_ops = {
418*4882a593Smuzhiyun .create = ieee802154_header_create,
419*4882a593Smuzhiyun };
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* This header create functionality assumes a 8 byte array for
422*4882a593Smuzhiyun * source and destination pointer at maximum. To adapt this for
423*4882a593Smuzhiyun * the 802.15.4 dataframe header we use extended address handling
424*4882a593Smuzhiyun * here only and intra pan connection. fc fields are mostly fallback
425*4882a593Smuzhiyun * handling. For provide dev_hard_header for dgram sockets.
426*4882a593Smuzhiyun */
mac802154_header_create(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned len)427*4882a593Smuzhiyun static int mac802154_header_create(struct sk_buff *skb,
428*4882a593Smuzhiyun struct net_device *dev,
429*4882a593Smuzhiyun unsigned short type,
430*4882a593Smuzhiyun const void *daddr,
431*4882a593Smuzhiyun const void *saddr,
432*4882a593Smuzhiyun unsigned len)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct ieee802154_hdr hdr;
435*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
436*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
437*4882a593Smuzhiyun struct ieee802154_mac_cb cb = { };
438*4882a593Smuzhiyun int hlen;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!daddr)
441*4882a593Smuzhiyun return -EINVAL;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun memset(&hdr.fc, 0, sizeof(hdr.fc));
444*4882a593Smuzhiyun hdr.fc.type = IEEE802154_FC_TYPE_DATA;
445*4882a593Smuzhiyun hdr.fc.ack_request = wpan_dev->ackreq;
446*4882a593Smuzhiyun hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /* TODO currently a workaround to give zero cb block to set
449*4882a593Smuzhiyun * security parameters defaults according MIB.
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun if (mac802154_set_header_security(sdata, &hdr, &cb) < 0)
452*4882a593Smuzhiyun return -EINVAL;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun hdr.dest.pan_id = wpan_dev->pan_id;
455*4882a593Smuzhiyun hdr.dest.mode = IEEE802154_ADDR_LONG;
456*4882a593Smuzhiyun ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun hdr.source.pan_id = hdr.dest.pan_id;
459*4882a593Smuzhiyun hdr.source.mode = IEEE802154_ADDR_LONG;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (!saddr)
462*4882a593Smuzhiyun hdr.source.extended_addr = wpan_dev->extended_addr;
463*4882a593Smuzhiyun else
464*4882a593Smuzhiyun ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun hlen = ieee802154_hdr_push(skb, &hdr);
467*4882a593Smuzhiyun if (hlen < 0)
468*4882a593Smuzhiyun return -EINVAL;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun skb_reset_mac_header(skb);
471*4882a593Smuzhiyun skb->mac_len = hlen;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (len > ieee802154_max_payload(&hdr))
474*4882a593Smuzhiyun return -EMSGSIZE;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun return hlen;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun static int
mac802154_header_parse(const struct sk_buff * skb,unsigned char * haddr)480*4882a593Smuzhiyun mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct ieee802154_hdr hdr;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
485*4882a593Smuzhiyun pr_debug("malformed packet\n");
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (hdr.source.mode == IEEE802154_ADDR_LONG) {
490*4882a593Smuzhiyun ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr);
491*4882a593Smuzhiyun return IEEE802154_EXTENDED_ADDR_LEN;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun static const struct header_ops mac802154_header_ops = {
498*4882a593Smuzhiyun .create = mac802154_header_create,
499*4882a593Smuzhiyun .parse = mac802154_header_parse,
500*4882a593Smuzhiyun };
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun static const struct net_device_ops mac802154_wpan_ops = {
503*4882a593Smuzhiyun .ndo_open = mac802154_wpan_open,
504*4882a593Smuzhiyun .ndo_stop = mac802154_slave_close,
505*4882a593Smuzhiyun .ndo_start_xmit = ieee802154_subif_start_xmit,
506*4882a593Smuzhiyun .ndo_do_ioctl = mac802154_wpan_ioctl,
507*4882a593Smuzhiyun .ndo_set_mac_address = mac802154_wpan_mac_addr,
508*4882a593Smuzhiyun };
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun static const struct net_device_ops mac802154_monitor_ops = {
511*4882a593Smuzhiyun .ndo_open = mac802154_wpan_open,
512*4882a593Smuzhiyun .ndo_stop = mac802154_slave_close,
513*4882a593Smuzhiyun .ndo_start_xmit = ieee802154_monitor_start_xmit,
514*4882a593Smuzhiyun };
515*4882a593Smuzhiyun
mac802154_wpan_free(struct net_device * dev)516*4882a593Smuzhiyun static void mac802154_wpan_free(struct net_device *dev)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun mac802154_llsec_destroy(&sdata->sec);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
ieee802154_if_setup(struct net_device * dev)523*4882a593Smuzhiyun static void ieee802154_if_setup(struct net_device *dev)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN;
526*4882a593Smuzhiyun memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET
529*4882a593Smuzhiyun * will not send frames without any payload, but ack frames
530*4882a593Smuzhiyun * has no payload, so substract one that we can send a 3 bytes
531*4882a593Smuzhiyun * frame. The xmit callback assumes at least a hard header where two
532*4882a593Smuzhiyun * bytes fc and sequence field are set.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun dev->hard_header_len = IEEE802154_MIN_HEADER_LEN - 1;
535*4882a593Smuzhiyun /* The auth_tag header is for security and places in private payload
536*4882a593Smuzhiyun * room of mac frame which stucks between payload and FCS field.
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun dev->needed_tailroom = IEEE802154_MAX_AUTH_TAG_LEN +
539*4882a593Smuzhiyun IEEE802154_FCS_LEN;
540*4882a593Smuzhiyun /* The mtu size is the payload without mac header in this case.
541*4882a593Smuzhiyun * We have a dynamic length header with a minimum header length
542*4882a593Smuzhiyun * which is hard_header_len. In this case we let mtu to the size
543*4882a593Smuzhiyun * of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN -
544*4882a593Smuzhiyun * hard_header_len. The FCS which is set by hardware or ndo_start_xmit
545*4882a593Smuzhiyun * and the minimum mac header which can be evaluated inside driver
546*4882a593Smuzhiyun * layer. The rest of mac header will be part of payload if greater
547*4882a593Smuzhiyun * than hard_header_len.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun dev->mtu = IEEE802154_MTU - IEEE802154_FCS_LEN -
550*4882a593Smuzhiyun dev->hard_header_len;
551*4882a593Smuzhiyun dev->tx_queue_len = 300;
552*4882a593Smuzhiyun dev->flags = IFF_NOARP | IFF_BROADCAST;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun static int
ieee802154_setup_sdata(struct ieee802154_sub_if_data * sdata,enum nl802154_iftype type)556*4882a593Smuzhiyun ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
557*4882a593Smuzhiyun enum nl802154_iftype type)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun struct wpan_dev *wpan_dev = &sdata->wpan_dev;
560*4882a593Smuzhiyun int ret;
561*4882a593Smuzhiyun u8 tmp;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* set some type-dependent values */
564*4882a593Smuzhiyun sdata->wpan_dev.iftype = type;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun get_random_bytes(&tmp, sizeof(tmp));
567*4882a593Smuzhiyun atomic_set(&wpan_dev->bsn, tmp);
568*4882a593Smuzhiyun get_random_bytes(&tmp, sizeof(tmp));
569*4882a593Smuzhiyun atomic_set(&wpan_dev->dsn, tmp);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* defaults per 802.15.4-2011 */
572*4882a593Smuzhiyun wpan_dev->min_be = 3;
573*4882a593Smuzhiyun wpan_dev->max_be = 5;
574*4882a593Smuzhiyun wpan_dev->csma_retries = 4;
575*4882a593Smuzhiyun wpan_dev->frame_retries = 3;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
578*4882a593Smuzhiyun wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun switch (type) {
581*4882a593Smuzhiyun case NL802154_IFTYPE_NODE:
582*4882a593Smuzhiyun ieee802154_be64_to_le64(&wpan_dev->extended_addr,
583*4882a593Smuzhiyun sdata->dev->dev_addr);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun sdata->dev->header_ops = &mac802154_header_ops;
586*4882a593Smuzhiyun sdata->dev->needs_free_netdev = true;
587*4882a593Smuzhiyun sdata->dev->priv_destructor = mac802154_wpan_free;
588*4882a593Smuzhiyun sdata->dev->netdev_ops = &mac802154_wpan_ops;
589*4882a593Smuzhiyun sdata->dev->ml_priv = &mac802154_mlme_wpan;
590*4882a593Smuzhiyun wpan_dev->promiscuous_mode = false;
591*4882a593Smuzhiyun wpan_dev->header_ops = &ieee802154_header_ops;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun mutex_init(&sdata->sec_mtx);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun mac802154_llsec_init(&sdata->sec);
596*4882a593Smuzhiyun ret = mac802154_wpan_update_llsec(sdata->dev);
597*4882a593Smuzhiyun if (ret < 0)
598*4882a593Smuzhiyun return ret;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun break;
601*4882a593Smuzhiyun case NL802154_IFTYPE_MONITOR:
602*4882a593Smuzhiyun sdata->dev->needs_free_netdev = true;
603*4882a593Smuzhiyun sdata->dev->netdev_ops = &mac802154_monitor_ops;
604*4882a593Smuzhiyun wpan_dev->promiscuous_mode = true;
605*4882a593Smuzhiyun break;
606*4882a593Smuzhiyun default:
607*4882a593Smuzhiyun BUG();
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun return 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun struct net_device *
ieee802154_if_add(struct ieee802154_local * local,const char * name,unsigned char name_assign_type,enum nl802154_iftype type,__le64 extended_addr)614*4882a593Smuzhiyun ieee802154_if_add(struct ieee802154_local *local, const char *name,
615*4882a593Smuzhiyun unsigned char name_assign_type, enum nl802154_iftype type,
616*4882a593Smuzhiyun __le64 extended_addr)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun struct net_device *ndev = NULL;
619*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = NULL;
620*4882a593Smuzhiyun int ret = -ENOMEM;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun ASSERT_RTNL();
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun ndev = alloc_netdev(sizeof(*sdata), name,
625*4882a593Smuzhiyun name_assign_type, ieee802154_if_setup);
626*4882a593Smuzhiyun if (!ndev)
627*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun ndev->needed_headroom = local->hw.extra_tx_headroom +
630*4882a593Smuzhiyun IEEE802154_MAX_HEADER_LEN;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun ret = dev_alloc_name(ndev, ndev->name);
633*4882a593Smuzhiyun if (ret < 0)
634*4882a593Smuzhiyun goto err;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ieee802154_le64_to_be64(ndev->perm_addr,
637*4882a593Smuzhiyun &local->hw.phy->perm_extended_addr);
638*4882a593Smuzhiyun switch (type) {
639*4882a593Smuzhiyun case NL802154_IFTYPE_NODE:
640*4882a593Smuzhiyun ndev->type = ARPHRD_IEEE802154;
641*4882a593Smuzhiyun if (ieee802154_is_valid_extended_unicast_addr(extended_addr))
642*4882a593Smuzhiyun ieee802154_le64_to_be64(ndev->dev_addr, &extended_addr);
643*4882a593Smuzhiyun else
644*4882a593Smuzhiyun memcpy(ndev->dev_addr, ndev->perm_addr,
645*4882a593Smuzhiyun IEEE802154_EXTENDED_ADDR_LEN);
646*4882a593Smuzhiyun break;
647*4882a593Smuzhiyun case NL802154_IFTYPE_MONITOR:
648*4882a593Smuzhiyun ndev->type = ARPHRD_IEEE802154_MONITOR;
649*4882a593Smuzhiyun break;
650*4882a593Smuzhiyun default:
651*4882a593Smuzhiyun ret = -EINVAL;
652*4882a593Smuzhiyun goto err;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* TODO check this */
656*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &local->phy->dev);
657*4882a593Smuzhiyun dev_net_set(ndev, wpan_phy_net(local->hw.phy));
658*4882a593Smuzhiyun sdata = netdev_priv(ndev);
659*4882a593Smuzhiyun ndev->ieee802154_ptr = &sdata->wpan_dev;
660*4882a593Smuzhiyun memcpy(sdata->name, ndev->name, IFNAMSIZ);
661*4882a593Smuzhiyun sdata->dev = ndev;
662*4882a593Smuzhiyun sdata->wpan_dev.wpan_phy = local->hw.phy;
663*4882a593Smuzhiyun sdata->local = local;
664*4882a593Smuzhiyun INIT_LIST_HEAD(&sdata->wpan_dev.list);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* setup type-dependent data */
667*4882a593Smuzhiyun ret = ieee802154_setup_sdata(sdata, type);
668*4882a593Smuzhiyun if (ret)
669*4882a593Smuzhiyun goto err;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun ret = register_netdevice(ndev);
672*4882a593Smuzhiyun if (ret < 0)
673*4882a593Smuzhiyun goto err;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun mutex_lock(&local->iflist_mtx);
676*4882a593Smuzhiyun list_add_tail_rcu(&sdata->list, &local->interfaces);
677*4882a593Smuzhiyun mutex_unlock(&local->iflist_mtx);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun return ndev;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun err:
682*4882a593Smuzhiyun free_netdev(ndev);
683*4882a593Smuzhiyun return ERR_PTR(ret);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
ieee802154_if_remove(struct ieee802154_sub_if_data * sdata)686*4882a593Smuzhiyun void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun ASSERT_RTNL();
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun mutex_lock(&sdata->local->iflist_mtx);
691*4882a593Smuzhiyun list_del_rcu(&sdata->list);
692*4882a593Smuzhiyun mutex_unlock(&sdata->local->iflist_mtx);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun synchronize_rcu();
695*4882a593Smuzhiyun unregister_netdevice(sdata->dev);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
ieee802154_remove_interfaces(struct ieee802154_local * local)698*4882a593Smuzhiyun void ieee802154_remove_interfaces(struct ieee802154_local *local)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata, *tmp;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun mutex_lock(&local->iflist_mtx);
703*4882a593Smuzhiyun list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
704*4882a593Smuzhiyun list_del(&sdata->list);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun unregister_netdevice(sdata->dev);
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun mutex_unlock(&local->iflist_mtx);
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
netdev_notify(struct notifier_block * nb,unsigned long state,void * ptr)711*4882a593Smuzhiyun static int netdev_notify(struct notifier_block *nb,
712*4882a593Smuzhiyun unsigned long state, void *ptr)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct net_device *dev = netdev_notifier_info_to_dev(ptr);
715*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (state != NETDEV_CHANGENAME)
718*4882a593Smuzhiyun return NOTIFY_DONE;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (!dev->ieee802154_ptr || !dev->ieee802154_ptr->wpan_phy)
721*4882a593Smuzhiyun return NOTIFY_DONE;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (dev->ieee802154_ptr->wpan_phy->privid != mac802154_wpan_phy_privid)
724*4882a593Smuzhiyun return NOTIFY_DONE;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun sdata = IEEE802154_DEV_TO_SUB_IF(dev);
727*4882a593Smuzhiyun memcpy(sdata->name, dev->name, IFNAMSIZ);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun return NOTIFY_OK;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun static struct notifier_block mac802154_netdev_notifier = {
733*4882a593Smuzhiyun .notifier_call = netdev_notify,
734*4882a593Smuzhiyun };
735*4882a593Smuzhiyun
ieee802154_iface_init(void)736*4882a593Smuzhiyun int ieee802154_iface_init(void)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun return register_netdevice_notifier(&mac802154_netdev_notifier);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
ieee802154_iface_exit(void)741*4882a593Smuzhiyun void ieee802154_iface_exit(void)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun unregister_netdevice_notifier(&mac802154_netdev_notifier);
744*4882a593Smuzhiyun }
745