xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/hisilicon/hns/hnae.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2014-2015 Hisilicon Limited.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/dma-mapping.h>
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/of.h>
9*4882a593Smuzhiyun #include <linux/skbuff.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include "hnae.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static struct class *hnae_class;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun static void
hnae_list_add(spinlock_t * lock,struct list_head * node,struct list_head * head)18*4882a593Smuzhiyun hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	unsigned long flags;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	spin_lock_irqsave(lock, flags);
23*4882a593Smuzhiyun 	list_add_tail_rcu(node, head);
24*4882a593Smuzhiyun 	spin_unlock_irqrestore(lock, flags);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
hnae_list_del(spinlock_t * lock,struct list_head * node)27*4882a593Smuzhiyun static void hnae_list_del(spinlock_t *lock, struct list_head *node)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	unsigned long flags;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	spin_lock_irqsave(lock, flags);
32*4882a593Smuzhiyun 	list_del_rcu(node);
33*4882a593Smuzhiyun 	spin_unlock_irqrestore(lock, flags);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
hnae_alloc_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)36*4882a593Smuzhiyun static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	unsigned int order = hnae_page_order(ring);
39*4882a593Smuzhiyun 	struct page *p = dev_alloc_pages(order);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (!p)
42*4882a593Smuzhiyun 		return -ENOMEM;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	cb->priv = p;
45*4882a593Smuzhiyun 	cb->page_offset = 0;
46*4882a593Smuzhiyun 	cb->reuse_flag = 0;
47*4882a593Smuzhiyun 	cb->buf  = page_address(p);
48*4882a593Smuzhiyun 	cb->length = hnae_page_size(ring);
49*4882a593Smuzhiyun 	cb->type = DESC_TYPE_PAGE;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	return 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
hnae_free_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)54*4882a593Smuzhiyun static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	if (unlikely(!cb->priv))
57*4882a593Smuzhiyun 		return;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (cb->type == DESC_TYPE_SKB)
60*4882a593Smuzhiyun 		dev_kfree_skb_any((struct sk_buff *)cb->priv);
61*4882a593Smuzhiyun 	else if (unlikely(is_rx_ring(ring)))
62*4882a593Smuzhiyun 		put_page((struct page *)cb->priv);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	cb->priv = NULL;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
hnae_map_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)67*4882a593Smuzhiyun static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
70*4882a593Smuzhiyun 			       cb->length, ring_to_dma_dir(ring));
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if (dma_mapping_error(ring_to_dev(ring), cb->dma))
73*4882a593Smuzhiyun 		return -EIO;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
hnae_unmap_buffer(struct hnae_ring * ring,struct hnae_desc_cb * cb)78*4882a593Smuzhiyun static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	if (cb->type == DESC_TYPE_SKB)
81*4882a593Smuzhiyun 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
82*4882a593Smuzhiyun 				 ring_to_dma_dir(ring));
83*4882a593Smuzhiyun 	else if (cb->length)
84*4882a593Smuzhiyun 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
85*4882a593Smuzhiyun 			       ring_to_dma_dir(ring));
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static struct hnae_buf_ops hnae_bops = {
89*4882a593Smuzhiyun 	.alloc_buffer = hnae_alloc_buffer,
90*4882a593Smuzhiyun 	.free_buffer = hnae_free_buffer,
91*4882a593Smuzhiyun 	.map_buffer = hnae_map_buffer,
92*4882a593Smuzhiyun 	.unmap_buffer = hnae_unmap_buffer,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
__ae_match(struct device * dev,const void * data)95*4882a593Smuzhiyun static int __ae_match(struct device *dev, const void *data)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (dev_of_node(hdev->dev))
100*4882a593Smuzhiyun 		return (data == &hdev->dev->of_node->fwnode);
101*4882a593Smuzhiyun 	else if (is_acpi_node(hdev->dev->fwnode))
102*4882a593Smuzhiyun 		return (data == hdev->dev->fwnode);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
find_ae(const struct fwnode_handle * fwnode)108*4882a593Smuzhiyun static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct device *dev;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	WARN_ON(!fwnode);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return dev ? cls_to_ae_dev(dev) : NULL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
hnae_free_buffers(struct hnae_ring * ring)119*4882a593Smuzhiyun static void hnae_free_buffers(struct hnae_ring *ring)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	int i;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	for (i = 0; i < ring->desc_num; i++)
124*4882a593Smuzhiyun 		hnae_free_buffer_detach(ring, i);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* Allocate memory for raw pkg, and map with dma */
hnae_alloc_buffers(struct hnae_ring * ring)128*4882a593Smuzhiyun static int hnae_alloc_buffers(struct hnae_ring *ring)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	int i, j, ret;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	for (i = 0; i < ring->desc_num; i++) {
133*4882a593Smuzhiyun 		ret = hnae_alloc_buffer_attach(ring, i);
134*4882a593Smuzhiyun 		if (ret)
135*4882a593Smuzhiyun 			goto out_buffer_fail;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return 0;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun out_buffer_fail:
141*4882a593Smuzhiyun 	for (j = i - 1; j >= 0; j--)
142*4882a593Smuzhiyun 		hnae_free_buffer_detach(ring, j);
143*4882a593Smuzhiyun 	return ret;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /* free desc along with its attached buffer */
hnae_free_desc(struct hnae_ring * ring)147*4882a593Smuzhiyun static void hnae_free_desc(struct hnae_ring *ring)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
150*4882a593Smuzhiyun 			 ring->desc_num * sizeof(ring->desc[0]),
151*4882a593Smuzhiyun 			 ring_to_dma_dir(ring));
152*4882a593Smuzhiyun 	ring->desc_dma_addr = 0;
153*4882a593Smuzhiyun 	kfree(ring->desc);
154*4882a593Smuzhiyun 	ring->desc = NULL;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /* alloc desc, without buffer attached */
hnae_alloc_desc(struct hnae_ring * ring)158*4882a593Smuzhiyun static int hnae_alloc_desc(struct hnae_ring *ring)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	int size = ring->desc_num * sizeof(ring->desc[0]);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	ring->desc = kzalloc(size, GFP_KERNEL);
163*4882a593Smuzhiyun 	if (!ring->desc)
164*4882a593Smuzhiyun 		return -ENOMEM;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
167*4882a593Smuzhiyun 		ring->desc, size, ring_to_dma_dir(ring));
168*4882a593Smuzhiyun 	if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
169*4882a593Smuzhiyun 		ring->desc_dma_addr = 0;
170*4882a593Smuzhiyun 		kfree(ring->desc);
171*4882a593Smuzhiyun 		ring->desc = NULL;
172*4882a593Smuzhiyun 		return -ENOMEM;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /* fini ring, also free the buffer for the ring */
hnae_fini_ring(struct hnae_ring * ring)179*4882a593Smuzhiyun static void hnae_fini_ring(struct hnae_ring *ring)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	if (is_rx_ring(ring))
182*4882a593Smuzhiyun 		hnae_free_buffers(ring);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	hnae_free_desc(ring);
185*4882a593Smuzhiyun 	kfree(ring->desc_cb);
186*4882a593Smuzhiyun 	ring->desc_cb = NULL;
187*4882a593Smuzhiyun 	ring->next_to_clean = 0;
188*4882a593Smuzhiyun 	ring->next_to_use = 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* init ring, and with buffer for rx ring */
192*4882a593Smuzhiyun static int
hnae_init_ring(struct hnae_queue * q,struct hnae_ring * ring,int flags)193*4882a593Smuzhiyun hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	int ret;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (ring->desc_num <= 0 || ring->buf_size <= 0)
198*4882a593Smuzhiyun 		return -EINVAL;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	ring->q = q;
201*4882a593Smuzhiyun 	ring->flags = flags;
202*4882a593Smuzhiyun 	ring->coal_param = q->handle->coal_param;
203*4882a593Smuzhiyun 	assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* not matter for tx or rx ring, the ntc and ntc start from 0 */
206*4882a593Smuzhiyun 	assert(ring->next_to_use == 0);
207*4882a593Smuzhiyun 	assert(ring->next_to_clean == 0);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
210*4882a593Smuzhiyun 			GFP_KERNEL);
211*4882a593Smuzhiyun 	if (!ring->desc_cb) {
212*4882a593Smuzhiyun 		ret = -ENOMEM;
213*4882a593Smuzhiyun 		goto out;
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	ret = hnae_alloc_desc(ring);
217*4882a593Smuzhiyun 	if (ret)
218*4882a593Smuzhiyun 		goto out_with_desc_cb;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (is_rx_ring(ring)) {
221*4882a593Smuzhiyun 		ret = hnae_alloc_buffers(ring);
222*4882a593Smuzhiyun 		if (ret)
223*4882a593Smuzhiyun 			goto out_with_desc;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	return 0;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun out_with_desc:
229*4882a593Smuzhiyun 	hnae_free_desc(ring);
230*4882a593Smuzhiyun out_with_desc_cb:
231*4882a593Smuzhiyun 	kfree(ring->desc_cb);
232*4882a593Smuzhiyun 	ring->desc_cb = NULL;
233*4882a593Smuzhiyun out:
234*4882a593Smuzhiyun 	return ret;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
hnae_init_queue(struct hnae_handle * h,struct hnae_queue * q,struct hnae_ae_dev * dev)237*4882a593Smuzhiyun static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
238*4882a593Smuzhiyun 			   struct hnae_ae_dev *dev)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	int ret;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	q->dev = dev;
243*4882a593Smuzhiyun 	q->handle = h;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
246*4882a593Smuzhiyun 	if (ret)
247*4882a593Smuzhiyun 		goto out;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
250*4882a593Smuzhiyun 	if (ret)
251*4882a593Smuzhiyun 		goto out_with_tx_ring;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (dev->ops->init_queue)
254*4882a593Smuzhiyun 		dev->ops->init_queue(q);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return 0;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun out_with_tx_ring:
259*4882a593Smuzhiyun 	hnae_fini_ring(&q->tx_ring);
260*4882a593Smuzhiyun out:
261*4882a593Smuzhiyun 	return ret;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
hnae_fini_queue(struct hnae_queue * q)264*4882a593Smuzhiyun static void hnae_fini_queue(struct hnae_queue *q)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	if (q->dev->ops->fini_queue)
267*4882a593Smuzhiyun 		q->dev->ops->fini_queue(q);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	hnae_fini_ring(&q->tx_ring);
270*4882a593Smuzhiyun 	hnae_fini_ring(&q->rx_ring);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun  * ae_chain - define ae chain head
275*4882a593Smuzhiyun  */
276*4882a593Smuzhiyun static RAW_NOTIFIER_HEAD(ae_chain);
277*4882a593Smuzhiyun 
hnae_register_notifier(struct notifier_block * nb)278*4882a593Smuzhiyun int hnae_register_notifier(struct notifier_block *nb)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return raw_notifier_chain_register(&ae_chain, nb);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_register_notifier);
283*4882a593Smuzhiyun 
hnae_unregister_notifier(struct notifier_block * nb)284*4882a593Smuzhiyun void hnae_unregister_notifier(struct notifier_block *nb)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	if (raw_notifier_chain_unregister(&ae_chain, nb))
287*4882a593Smuzhiyun 		dev_err(NULL, "notifier chain unregister fail\n");
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_unregister_notifier);
290*4882a593Smuzhiyun 
hnae_reinit_handle(struct hnae_handle * handle)291*4882a593Smuzhiyun int hnae_reinit_handle(struct hnae_handle *handle)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	int i, j;
294*4882a593Smuzhiyun 	int ret;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	for (i = 0; i < handle->q_num; i++) /* free ring*/
297*4882a593Smuzhiyun 		hnae_fini_queue(handle->qs[i]);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (handle->dev->ops->reset)
300*4882a593Smuzhiyun 		handle->dev->ops->reset(handle);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
303*4882a593Smuzhiyun 		ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
304*4882a593Smuzhiyun 		if (ret)
305*4882a593Smuzhiyun 			goto out_when_init_queue;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 	return 0;
308*4882a593Smuzhiyun out_when_init_queue:
309*4882a593Smuzhiyun 	for (j = i - 1; j >= 0; j--)
310*4882a593Smuzhiyun 		hnae_fini_queue(handle->qs[j]);
311*4882a593Smuzhiyun 	return ret;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_reinit_handle);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /* hnae_get_handle - get a handle from the AE
316*4882a593Smuzhiyun  * @owner_dev: the dev use this handle
317*4882a593Smuzhiyun  * @ae_id: the id of the ae to be used
318*4882a593Smuzhiyun  * @ae_opts: the options set for the handle
319*4882a593Smuzhiyun  * @bops: the callbacks for buffer management
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  * return handle ptr or ERR_PTR
322*4882a593Smuzhiyun  */
hnae_get_handle(struct device * owner_dev,const struct fwnode_handle * fwnode,u32 port_id,struct hnae_buf_ops * bops)323*4882a593Smuzhiyun struct hnae_handle *hnae_get_handle(struct device *owner_dev,
324*4882a593Smuzhiyun 				    const struct fwnode_handle	*fwnode,
325*4882a593Smuzhiyun 				    u32 port_id,
326*4882a593Smuzhiyun 				    struct hnae_buf_ops *bops)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct hnae_ae_dev *dev;
329*4882a593Smuzhiyun 	struct hnae_handle *handle;
330*4882a593Smuzhiyun 	int i, j;
331*4882a593Smuzhiyun 	int ret;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	dev = find_ae(fwnode);
334*4882a593Smuzhiyun 	if (!dev)
335*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	handle = dev->ops->get_handle(dev, port_id);
338*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
339*4882a593Smuzhiyun 		put_device(&dev->cls_dev);
340*4882a593Smuzhiyun 		return handle;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	handle->dev = dev;
344*4882a593Smuzhiyun 	handle->owner_dev = owner_dev;
345*4882a593Smuzhiyun 	handle->bops = bops ? bops : &hnae_bops;
346*4882a593Smuzhiyun 	handle->eport_id = port_id;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	for (i = 0; i < handle->q_num; i++) {
349*4882a593Smuzhiyun 		ret = hnae_init_queue(handle, handle->qs[i], dev);
350*4882a593Smuzhiyun 		if (ret)
351*4882a593Smuzhiyun 			goto out_when_init_queue;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	__module_get(dev->owner);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return handle;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun out_when_init_queue:
361*4882a593Smuzhiyun 	for (j = i - 1; j >= 0; j--)
362*4882a593Smuzhiyun 		hnae_fini_queue(handle->qs[j]);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	put_device(&dev->cls_dev);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_get_handle);
369*4882a593Smuzhiyun 
hnae_put_handle(struct hnae_handle * h)370*4882a593Smuzhiyun void hnae_put_handle(struct hnae_handle *h)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct hnae_ae_dev *dev = h->dev;
373*4882a593Smuzhiyun 	int i;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	for (i = 0; i < h->q_num; i++)
376*4882a593Smuzhiyun 		hnae_fini_queue(h->qs[i]);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (h->dev->ops->reset)
379*4882a593Smuzhiyun 		h->dev->ops->reset(h);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	hnae_list_del(&dev->lock, &h->node);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (dev->ops->put_handle)
384*4882a593Smuzhiyun 		dev->ops->put_handle(h);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	module_put(dev->owner);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	put_device(&dev->cls_dev);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_put_handle);
391*4882a593Smuzhiyun 
hnae_release(struct device * dev)392*4882a593Smuzhiyun static void hnae_release(struct device *dev)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun  * hnae_ae_register - register a AE engine to hnae framework
398*4882a593Smuzhiyun  * @hdev: the hnae ae engine device
399*4882a593Smuzhiyun  * @owner:  the module who provides this dev
400*4882a593Smuzhiyun  * NOTE: the duplicated name will not be checked
401*4882a593Smuzhiyun  */
hnae_ae_register(struct hnae_ae_dev * hdev,struct module * owner)402*4882a593Smuzhiyun int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	static atomic_t id = ATOMIC_INIT(-1);
405*4882a593Smuzhiyun 	int ret;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (!hdev->dev)
408*4882a593Smuzhiyun 		return -ENODEV;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (!hdev->ops || !hdev->ops->get_handle ||
411*4882a593Smuzhiyun 	    !hdev->ops->toggle_ring_irq ||
412*4882a593Smuzhiyun 	    !hdev->ops->get_status || !hdev->ops->adjust_link)
413*4882a593Smuzhiyun 		return -EINVAL;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	hdev->owner = owner;
416*4882a593Smuzhiyun 	hdev->id = (int)atomic_inc_return(&id);
417*4882a593Smuzhiyun 	hdev->cls_dev.parent = hdev->dev;
418*4882a593Smuzhiyun 	hdev->cls_dev.class = hnae_class;
419*4882a593Smuzhiyun 	hdev->cls_dev.release = hnae_release;
420*4882a593Smuzhiyun 	(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
421*4882a593Smuzhiyun 	ret = device_register(&hdev->cls_dev);
422*4882a593Smuzhiyun 	if (ret) {
423*4882a593Smuzhiyun 		put_device(&hdev->cls_dev);
424*4882a593Smuzhiyun 		return ret;
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	__module_get(THIS_MODULE);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hdev->handle_list);
430*4882a593Smuzhiyun 	spin_lock_init(&hdev->lock);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
433*4882a593Smuzhiyun 	if (ret)
434*4882a593Smuzhiyun 		dev_dbg(hdev->dev,
435*4882a593Smuzhiyun 			"has not notifier for AE: %s\n", hdev->name);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	return 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_ae_register);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /**
442*4882a593Smuzhiyun  * hnae_ae_unregister - unregisters a HNAE AE engine
443*4882a593Smuzhiyun  * @hdev: the device to unregister
444*4882a593Smuzhiyun  */
hnae_ae_unregister(struct hnae_ae_dev * hdev)445*4882a593Smuzhiyun void hnae_ae_unregister(struct hnae_ae_dev *hdev)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	device_unregister(&hdev->cls_dev);
448*4882a593Smuzhiyun 	module_put(THIS_MODULE);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun EXPORT_SYMBOL(hnae_ae_unregister);
451*4882a593Smuzhiyun 
hnae_init(void)452*4882a593Smuzhiyun static int __init hnae_init(void)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	hnae_class = class_create(THIS_MODULE, "hnae");
455*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(hnae_class);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
hnae_exit(void)458*4882a593Smuzhiyun static void __exit hnae_exit(void)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	class_destroy(hnae_class);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun subsys_initcall(hnae_init);
464*4882a593Smuzhiyun module_exit(hnae_exit);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun MODULE_AUTHOR("Hisilicon, Inc.");
467*4882a593Smuzhiyun MODULE_LICENSE("GPL");
468*4882a593Smuzhiyun MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun /* vi: set tw=78 noet: */
471