1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2007-2012 Siemens AG
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Written by:
6*4882a593Smuzhiyun * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
7*4882a593Smuzhiyun * Sergey Lapin <slapin@ossfans.org>
8*4882a593Smuzhiyun * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
9*4882a593Smuzhiyun * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/if_arp.h>
14*4882a593Smuzhiyun #include <linux/crc-ccitt.h>
15*4882a593Smuzhiyun #include <asm/unaligned.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <net/rtnetlink.h>
18*4882a593Smuzhiyun #include <net/ieee802154_netdev.h>
19*4882a593Smuzhiyun #include <net/mac802154.h>
20*4882a593Smuzhiyun #include <net/cfg802154.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "ieee802154_i.h"
23*4882a593Smuzhiyun #include "driver-ops.h"
24*4882a593Smuzhiyun
ieee802154_xmit_worker(struct work_struct * work)25*4882a593Smuzhiyun void ieee802154_xmit_worker(struct work_struct *work)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct ieee802154_local *local =
28*4882a593Smuzhiyun container_of(work, struct ieee802154_local, tx_work);
29*4882a593Smuzhiyun struct sk_buff *skb = local->tx_skb;
30*4882a593Smuzhiyun struct net_device *dev = skb->dev;
31*4882a593Smuzhiyun int res;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun res = drv_xmit_sync(local, skb);
34*4882a593Smuzhiyun if (res)
35*4882a593Smuzhiyun goto err_tx;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun dev->stats.tx_packets++;
38*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun ieee802154_xmit_complete(&local->hw, skb, false);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun err_tx:
45*4882a593Smuzhiyun /* Restart the netif queue on each sub_if_data object. */
46*4882a593Smuzhiyun ieee802154_wake_queue(&local->hw);
47*4882a593Smuzhiyun kfree_skb(skb);
48*4882a593Smuzhiyun netdev_dbg(dev, "transmission failed\n");
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun static netdev_tx_t
ieee802154_tx(struct ieee802154_local * local,struct sk_buff * skb)52*4882a593Smuzhiyun ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct net_device *dev = skb->dev;
55*4882a593Smuzhiyun int ret;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
58*4882a593Smuzhiyun struct sk_buff *nskb;
59*4882a593Smuzhiyun u16 crc;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
62*4882a593Smuzhiyun nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
63*4882a593Smuzhiyun GFP_ATOMIC);
64*4882a593Smuzhiyun if (likely(nskb)) {
65*4882a593Smuzhiyun consume_skb(skb);
66*4882a593Smuzhiyun skb = nskb;
67*4882a593Smuzhiyun } else {
68*4882a593Smuzhiyun goto err_tx;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun crc = crc_ccitt(0, skb->data, skb->len);
73*4882a593Smuzhiyun put_unaligned_le16(crc, skb_put(skb, 2));
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Stop the netif queue on each sub_if_data object. */
77*4882a593Smuzhiyun ieee802154_stop_queue(&local->hw);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* async is priority, otherwise sync is fallback */
80*4882a593Smuzhiyun if (local->ops->xmit_async) {
81*4882a593Smuzhiyun unsigned int len = skb->len;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun ret = drv_xmit_async(local, skb);
84*4882a593Smuzhiyun if (ret) {
85*4882a593Smuzhiyun ieee802154_wake_queue(&local->hw);
86*4882a593Smuzhiyun goto err_tx;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun dev->stats.tx_packets++;
90*4882a593Smuzhiyun dev->stats.tx_bytes += len;
91*4882a593Smuzhiyun } else {
92*4882a593Smuzhiyun local->tx_skb = skb;
93*4882a593Smuzhiyun queue_work(local->workqueue, &local->tx_work);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return NETDEV_TX_OK;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun err_tx:
99*4882a593Smuzhiyun kfree_skb(skb);
100*4882a593Smuzhiyun return NETDEV_TX_OK;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff * skb,struct net_device * dev)104*4882a593Smuzhiyun ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun skb->skb_iif = dev->ifindex;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return ieee802154_tx(sdata->local, skb);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun netdev_tx_t
ieee802154_subif_start_xmit(struct sk_buff * skb,struct net_device * dev)114*4882a593Smuzhiyun ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
117*4882a593Smuzhiyun int rc;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* TODO we should move it to wpan_dev_hard_header and dev_hard_header
120*4882a593Smuzhiyun * functions. The reason is wireshark will show a mac header which is
121*4882a593Smuzhiyun * with security fields but the payload is not encrypted.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun rc = mac802154_llsec_encrypt(&sdata->sec, skb);
124*4882a593Smuzhiyun if (rc) {
125*4882a593Smuzhiyun netdev_warn(dev, "encryption failed: %i\n", rc);
126*4882a593Smuzhiyun kfree_skb(skb);
127*4882a593Smuzhiyun return NETDEV_TX_OK;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun skb->skb_iif = dev->ifindex;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return ieee802154_tx(sdata->local, skb);
133*4882a593Smuzhiyun }
134