xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qualcomm/qca_uart.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
3*4882a593Smuzhiyun  *   Copyright (c) 2017, I2SE GmbH
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *   Permission to use, copy, modify, and/or distribute this software
6*4882a593Smuzhiyun  *   for any purpose with or without fee is hereby granted, provided
7*4882a593Smuzhiyun  *   that the above copyright notice and this permission notice appear
8*4882a593Smuzhiyun  *   in all copies.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*4882a593Smuzhiyun  *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*4882a593Smuzhiyun  *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
13*4882a593Smuzhiyun  *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
14*4882a593Smuzhiyun  *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15*4882a593Smuzhiyun  *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
16*4882a593Smuzhiyun  *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17*4882a593Smuzhiyun  *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*   This module implements the Qualcomm Atheros UART protocol for
21*4882a593Smuzhiyun  *   kernel-based UART device; it is essentially an Ethernet-to-UART
22*4882a593Smuzhiyun  *   serial converter;
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/device.h>
26*4882a593Smuzhiyun #include <linux/errno.h>
27*4882a593Smuzhiyun #include <linux/etherdevice.h>
28*4882a593Smuzhiyun #include <linux/if_arp.h>
29*4882a593Smuzhiyun #include <linux/if_ether.h>
30*4882a593Smuzhiyun #include <linux/jiffies.h>
31*4882a593Smuzhiyun #include <linux/kernel.h>
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun #include <linux/netdevice.h>
34*4882a593Smuzhiyun #include <linux/of.h>
35*4882a593Smuzhiyun #include <linux/of_device.h>
36*4882a593Smuzhiyun #include <linux/of_net.h>
37*4882a593Smuzhiyun #include <linux/sched.h>
38*4882a593Smuzhiyun #include <linux/serdev.h>
39*4882a593Smuzhiyun #include <linux/skbuff.h>
40*4882a593Smuzhiyun #include <linux/types.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include "qca_7k_common.h"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define QCAUART_DRV_VERSION "0.1.0"
45*4882a593Smuzhiyun #define QCAUART_DRV_NAME "qcauart"
46*4882a593Smuzhiyun #define QCAUART_TX_TIMEOUT (1 * HZ)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct qcauart {
49*4882a593Smuzhiyun 	struct net_device *net_dev;
50*4882a593Smuzhiyun 	spinlock_t lock;			/* transmit lock */
51*4882a593Smuzhiyun 	struct work_struct tx_work;		/* Flushes transmit buffer   */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	struct serdev_device *serdev;
54*4882a593Smuzhiyun 	struct qcafrm_handle frm_handle;
55*4882a593Smuzhiyun 	struct sk_buff *rx_skb;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	unsigned char *tx_head;			/* pointer to next XMIT byte */
58*4882a593Smuzhiyun 	int tx_left;				/* bytes left in XMIT queue  */
59*4882a593Smuzhiyun 	unsigned char *tx_buffer;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static int
qca_tty_receive(struct serdev_device * serdev,const unsigned char * data,size_t count)63*4882a593Smuzhiyun qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
64*4882a593Smuzhiyun 		size_t count)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct qcauart *qca = serdev_device_get_drvdata(serdev);
67*4882a593Smuzhiyun 	struct net_device *netdev = qca->net_dev;
68*4882a593Smuzhiyun 	struct net_device_stats *n_stats = &netdev->stats;
69*4882a593Smuzhiyun 	size_t i;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (!qca->rx_skb) {
72*4882a593Smuzhiyun 		qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
73*4882a593Smuzhiyun 							netdev->mtu +
74*4882a593Smuzhiyun 							VLAN_ETH_HLEN);
75*4882a593Smuzhiyun 		if (!qca->rx_skb) {
76*4882a593Smuzhiyun 			n_stats->rx_errors++;
77*4882a593Smuzhiyun 			n_stats->rx_dropped++;
78*4882a593Smuzhiyun 			return 0;
79*4882a593Smuzhiyun 		}
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
83*4882a593Smuzhiyun 		s32 retcode;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		retcode = qcafrm_fsm_decode(&qca->frm_handle,
86*4882a593Smuzhiyun 					    qca->rx_skb->data,
87*4882a593Smuzhiyun 					    skb_tailroom(qca->rx_skb),
88*4882a593Smuzhiyun 					    data[i]);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		switch (retcode) {
91*4882a593Smuzhiyun 		case QCAFRM_GATHER:
92*4882a593Smuzhiyun 		case QCAFRM_NOHEAD:
93*4882a593Smuzhiyun 			break;
94*4882a593Smuzhiyun 		case QCAFRM_NOTAIL:
95*4882a593Smuzhiyun 			netdev_dbg(netdev, "recv: no RX tail\n");
96*4882a593Smuzhiyun 			n_stats->rx_errors++;
97*4882a593Smuzhiyun 			n_stats->rx_dropped++;
98*4882a593Smuzhiyun 			break;
99*4882a593Smuzhiyun 		case QCAFRM_INVLEN:
100*4882a593Smuzhiyun 			netdev_dbg(netdev, "recv: invalid RX length\n");
101*4882a593Smuzhiyun 			n_stats->rx_errors++;
102*4882a593Smuzhiyun 			n_stats->rx_dropped++;
103*4882a593Smuzhiyun 			break;
104*4882a593Smuzhiyun 		default:
105*4882a593Smuzhiyun 			n_stats->rx_packets++;
106*4882a593Smuzhiyun 			n_stats->rx_bytes += retcode;
107*4882a593Smuzhiyun 			skb_put(qca->rx_skb, retcode);
108*4882a593Smuzhiyun 			qca->rx_skb->protocol = eth_type_trans(
109*4882a593Smuzhiyun 						qca->rx_skb, qca->rx_skb->dev);
110*4882a593Smuzhiyun 			skb_checksum_none_assert(qca->rx_skb);
111*4882a593Smuzhiyun 			netif_rx_ni(qca->rx_skb);
112*4882a593Smuzhiyun 			qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
113*4882a593Smuzhiyun 								netdev->mtu +
114*4882a593Smuzhiyun 								VLAN_ETH_HLEN);
115*4882a593Smuzhiyun 			if (!qca->rx_skb) {
116*4882a593Smuzhiyun 				netdev_dbg(netdev, "recv: out of RX resources\n");
117*4882a593Smuzhiyun 				n_stats->rx_errors++;
118*4882a593Smuzhiyun 				return i;
119*4882a593Smuzhiyun 			}
120*4882a593Smuzhiyun 		}
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return i;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* Write out any remaining transmit buffer. Scheduled when tty is writable */
qcauart_transmit(struct work_struct * work)127*4882a593Smuzhiyun static void qcauart_transmit(struct work_struct *work)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct qcauart *qca = container_of(work, struct qcauart, tx_work);
130*4882a593Smuzhiyun 	struct net_device_stats *n_stats = &qca->net_dev->stats;
131*4882a593Smuzhiyun 	int written;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	spin_lock_bh(&qca->lock);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* First make sure we're connected. */
136*4882a593Smuzhiyun 	if (!netif_running(qca->net_dev)) {
137*4882a593Smuzhiyun 		spin_unlock_bh(&qca->lock);
138*4882a593Smuzhiyun 		return;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (qca->tx_left <= 0)  {
142*4882a593Smuzhiyun 		/* Now serial buffer is almost free & we can start
143*4882a593Smuzhiyun 		 * transmission of another packet
144*4882a593Smuzhiyun 		 */
145*4882a593Smuzhiyun 		n_stats->tx_packets++;
146*4882a593Smuzhiyun 		spin_unlock_bh(&qca->lock);
147*4882a593Smuzhiyun 		netif_wake_queue(qca->net_dev);
148*4882a593Smuzhiyun 		return;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	written = serdev_device_write_buf(qca->serdev, qca->tx_head,
152*4882a593Smuzhiyun 					  qca->tx_left);
153*4882a593Smuzhiyun 	if (written > 0) {
154*4882a593Smuzhiyun 		qca->tx_left -= written;
155*4882a593Smuzhiyun 		qca->tx_head += written;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 	spin_unlock_bh(&qca->lock);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* Called by the driver when there's room for more data.
161*4882a593Smuzhiyun  * Schedule the transmit.
162*4882a593Smuzhiyun  */
qca_tty_wakeup(struct serdev_device * serdev)163*4882a593Smuzhiyun static void qca_tty_wakeup(struct serdev_device *serdev)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct qcauart *qca = serdev_device_get_drvdata(serdev);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	schedule_work(&qca->tx_work);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static const struct serdev_device_ops qca_serdev_ops = {
171*4882a593Smuzhiyun 	.receive_buf = qca_tty_receive,
172*4882a593Smuzhiyun 	.write_wakeup = qca_tty_wakeup,
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
qcauart_netdev_open(struct net_device * dev)175*4882a593Smuzhiyun static int qcauart_netdev_open(struct net_device *dev)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct qcauart *qca = netdev_priv(dev);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	netif_start_queue(qca->net_dev);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
qcauart_netdev_close(struct net_device * dev)184*4882a593Smuzhiyun static int qcauart_netdev_close(struct net_device *dev)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	struct qcauart *qca = netdev_priv(dev);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	netif_stop_queue(dev);
189*4882a593Smuzhiyun 	flush_work(&qca->tx_work);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	spin_lock_bh(&qca->lock);
192*4882a593Smuzhiyun 	qca->tx_left = 0;
193*4882a593Smuzhiyun 	spin_unlock_bh(&qca->lock);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun static netdev_tx_t
qcauart_netdev_xmit(struct sk_buff * skb,struct net_device * dev)199*4882a593Smuzhiyun qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct net_device_stats *n_stats = &dev->stats;
202*4882a593Smuzhiyun 	struct qcauart *qca = netdev_priv(dev);
203*4882a593Smuzhiyun 	u8 pad_len = 0;
204*4882a593Smuzhiyun 	int written;
205*4882a593Smuzhiyun 	u8 *pos;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	spin_lock(&qca->lock);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	WARN_ON(qca->tx_left);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (!netif_running(dev))  {
212*4882a593Smuzhiyun 		spin_unlock(&qca->lock);
213*4882a593Smuzhiyun 		netdev_warn(qca->net_dev, "xmit: iface is down\n");
214*4882a593Smuzhiyun 		goto out;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	pos = qca->tx_buffer;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (skb->len < QCAFRM_MIN_LEN)
220*4882a593Smuzhiyun 		pad_len = QCAFRM_MIN_LEN - skb->len;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	pos += qcafrm_create_header(pos, skb->len + pad_len);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	memcpy(pos, skb->data, skb->len);
225*4882a593Smuzhiyun 	pos += skb->len;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (pad_len) {
228*4882a593Smuzhiyun 		memset(pos, 0, pad_len);
229*4882a593Smuzhiyun 		pos += pad_len;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	pos += qcafrm_create_footer(pos);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	netif_stop_queue(qca->net_dev);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
237*4882a593Smuzhiyun 					  pos - qca->tx_buffer);
238*4882a593Smuzhiyun 	if (written > 0) {
239*4882a593Smuzhiyun 		qca->tx_left = (pos - qca->tx_buffer) - written;
240*4882a593Smuzhiyun 		qca->tx_head = qca->tx_buffer + written;
241*4882a593Smuzhiyun 		n_stats->tx_bytes += written;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 	spin_unlock(&qca->lock);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	netif_trans_update(dev);
246*4882a593Smuzhiyun out:
247*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
248*4882a593Smuzhiyun 	return NETDEV_TX_OK;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
qcauart_netdev_tx_timeout(struct net_device * dev,unsigned int txqueue)251*4882a593Smuzhiyun static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct qcauart *qca = netdev_priv(dev);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
256*4882a593Smuzhiyun 		    jiffies, dev_trans_start(dev));
257*4882a593Smuzhiyun 	dev->stats.tx_errors++;
258*4882a593Smuzhiyun 	dev->stats.tx_dropped++;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
qcauart_netdev_init(struct net_device * dev)261*4882a593Smuzhiyun static int qcauart_netdev_init(struct net_device *dev)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct qcauart *qca = netdev_priv(dev);
264*4882a593Smuzhiyun 	size_t len;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* Finish setting up the device info. */
267*4882a593Smuzhiyun 	dev->mtu = QCAFRM_MAX_MTU;
268*4882a593Smuzhiyun 	dev->type = ARPHRD_ETHER;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
271*4882a593Smuzhiyun 	qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
272*4882a593Smuzhiyun 	if (!qca->tx_buffer)
273*4882a593Smuzhiyun 		return -ENOMEM;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
276*4882a593Smuzhiyun 						qca->net_dev->mtu +
277*4882a593Smuzhiyun 						VLAN_ETH_HLEN);
278*4882a593Smuzhiyun 	if (!qca->rx_skb)
279*4882a593Smuzhiyun 		return -ENOBUFS;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
qcauart_netdev_uninit(struct net_device * dev)284*4882a593Smuzhiyun static void qcauart_netdev_uninit(struct net_device *dev)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct qcauart *qca = netdev_priv(dev);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	dev_kfree_skb(qca->rx_skb);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun static const struct net_device_ops qcauart_netdev_ops = {
292*4882a593Smuzhiyun 	.ndo_init = qcauart_netdev_init,
293*4882a593Smuzhiyun 	.ndo_uninit = qcauart_netdev_uninit,
294*4882a593Smuzhiyun 	.ndo_open = qcauart_netdev_open,
295*4882a593Smuzhiyun 	.ndo_stop = qcauart_netdev_close,
296*4882a593Smuzhiyun 	.ndo_start_xmit = qcauart_netdev_xmit,
297*4882a593Smuzhiyun 	.ndo_set_mac_address = eth_mac_addr,
298*4882a593Smuzhiyun 	.ndo_tx_timeout = qcauart_netdev_tx_timeout,
299*4882a593Smuzhiyun 	.ndo_validate_addr = eth_validate_addr,
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun 
qcauart_netdev_setup(struct net_device * dev)302*4882a593Smuzhiyun static void qcauart_netdev_setup(struct net_device *dev)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	dev->netdev_ops = &qcauart_netdev_ops;
305*4882a593Smuzhiyun 	dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
306*4882a593Smuzhiyun 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
307*4882a593Smuzhiyun 	dev->tx_queue_len = 100;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/* MTU range: 46 - 1500 */
310*4882a593Smuzhiyun 	dev->min_mtu = QCAFRM_MIN_MTU;
311*4882a593Smuzhiyun 	dev->max_mtu = QCAFRM_MAX_MTU;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun static const struct of_device_id qca_uart_of_match[] = {
315*4882a593Smuzhiyun 	{
316*4882a593Smuzhiyun 	 .compatible = "qca,qca7000",
317*4882a593Smuzhiyun 	},
318*4882a593Smuzhiyun 	{}
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, qca_uart_of_match);
321*4882a593Smuzhiyun 
qca_uart_probe(struct serdev_device * serdev)322*4882a593Smuzhiyun static int qca_uart_probe(struct serdev_device *serdev)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
325*4882a593Smuzhiyun 	struct qcauart *qca;
326*4882a593Smuzhiyun 	const char *mac;
327*4882a593Smuzhiyun 	u32 speed = 115200;
328*4882a593Smuzhiyun 	int ret;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (!qcauart_dev)
331*4882a593Smuzhiyun 		return -ENOMEM;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	qcauart_netdev_setup(qcauart_dev);
334*4882a593Smuzhiyun 	SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	qca = netdev_priv(qcauart_dev);
337*4882a593Smuzhiyun 	if (!qca) {
338*4882a593Smuzhiyun 		pr_err("qca_uart: Fail to retrieve private structure\n");
339*4882a593Smuzhiyun 		ret = -ENOMEM;
340*4882a593Smuzhiyun 		goto free;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 	qca->net_dev = qcauart_dev;
343*4882a593Smuzhiyun 	qca->serdev = serdev;
344*4882a593Smuzhiyun 	qcafrm_fsm_init_uart(&qca->frm_handle);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	spin_lock_init(&qca->lock);
347*4882a593Smuzhiyun 	INIT_WORK(&qca->tx_work, qcauart_transmit);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	mac = of_get_mac_address(serdev->dev.of_node);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (!IS_ERR(mac))
354*4882a593Smuzhiyun 		ether_addr_copy(qca->net_dev->dev_addr, mac);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
357*4882a593Smuzhiyun 		eth_hw_addr_random(qca->net_dev);
358*4882a593Smuzhiyun 		dev_info(&serdev->dev, "Using random MAC address: %pM\n",
359*4882a593Smuzhiyun 			 qca->net_dev->dev_addr);
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	netif_carrier_on(qca->net_dev);
363*4882a593Smuzhiyun 	serdev_device_set_drvdata(serdev, qca);
364*4882a593Smuzhiyun 	serdev_device_set_client_ops(serdev, &qca_serdev_ops);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	ret = serdev_device_open(serdev);
367*4882a593Smuzhiyun 	if (ret) {
368*4882a593Smuzhiyun 		dev_err(&serdev->dev, "Unable to open device %s\n",
369*4882a593Smuzhiyun 			qcauart_dev->name);
370*4882a593Smuzhiyun 		goto free;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	speed = serdev_device_set_baudrate(serdev, speed);
374*4882a593Smuzhiyun 	dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	serdev_device_set_flow_control(serdev, false);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	ret = register_netdev(qcauart_dev);
379*4882a593Smuzhiyun 	if (ret) {
380*4882a593Smuzhiyun 		dev_err(&serdev->dev, "Unable to register net device %s\n",
381*4882a593Smuzhiyun 			qcauart_dev->name);
382*4882a593Smuzhiyun 		serdev_device_close(serdev);
383*4882a593Smuzhiyun 		cancel_work_sync(&qca->tx_work);
384*4882a593Smuzhiyun 		goto free;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	return 0;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun free:
390*4882a593Smuzhiyun 	free_netdev(qcauart_dev);
391*4882a593Smuzhiyun 	return ret;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
qca_uart_remove(struct serdev_device * serdev)394*4882a593Smuzhiyun static void qca_uart_remove(struct serdev_device *serdev)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct qcauart *qca = serdev_device_get_drvdata(serdev);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	unregister_netdev(qca->net_dev);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Flush any pending characters in the driver. */
401*4882a593Smuzhiyun 	serdev_device_close(serdev);
402*4882a593Smuzhiyun 	cancel_work_sync(&qca->tx_work);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	free_netdev(qca->net_dev);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun static struct serdev_device_driver qca_uart_driver = {
408*4882a593Smuzhiyun 	.probe = qca_uart_probe,
409*4882a593Smuzhiyun 	.remove = qca_uart_remove,
410*4882a593Smuzhiyun 	.driver = {
411*4882a593Smuzhiyun 		.name = QCAUART_DRV_NAME,
412*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(qca_uart_of_match),
413*4882a593Smuzhiyun 	},
414*4882a593Smuzhiyun };
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun module_serdev_device_driver(qca_uart_driver);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
419*4882a593Smuzhiyun MODULE_AUTHOR("Qualcomm Atheros Communications");
420*4882a593Smuzhiyun MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
421*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
422*4882a593Smuzhiyun MODULE_VERSION(QCAUART_DRV_VERSION);
423