1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Keystone Queue Manager subsystem driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
6*4882a593Smuzhiyun * Authors: Sandeep Nair <sandeep_n@ti.com>
7*4882a593Smuzhiyun * Cyril Chemparathy <cyril@ti.com>
8*4882a593Smuzhiyun * Santosh Shilimkar <santosh.shilimkar@ti.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/debugfs.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/firmware.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/of_device.h>
19*4882a593Smuzhiyun #include <linux/of_irq.h>
20*4882a593Smuzhiyun #include <linux/pm_runtime.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/soc/ti/knav_qmss.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "knav_qmss.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static struct knav_device *kdev;
27*4882a593Smuzhiyun static DEFINE_MUTEX(knav_dev_lock);
28*4882a593Smuzhiyun #define knav_dev_lock_held() \
29*4882a593Smuzhiyun lockdep_is_held(&knav_dev_lock)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Queue manager register indices in DTS */
32*4882a593Smuzhiyun #define KNAV_QUEUE_PEEK_REG_INDEX 0
33*4882a593Smuzhiyun #define KNAV_QUEUE_STATUS_REG_INDEX 1
34*4882a593Smuzhiyun #define KNAV_QUEUE_CONFIG_REG_INDEX 2
35*4882a593Smuzhiyun #define KNAV_QUEUE_REGION_REG_INDEX 3
36*4882a593Smuzhiyun #define KNAV_QUEUE_PUSH_REG_INDEX 4
37*4882a593Smuzhiyun #define KNAV_QUEUE_POP_REG_INDEX 5
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* Queue manager register indices in DTS for QMSS in K2G NAVSS.
40*4882a593Smuzhiyun * There are no status and vbusm push registers on this version
41*4882a593Smuzhiyun * of QMSS. Push registers are same as pop, So all indices above 1
42*4882a593Smuzhiyun * are to be re-defined
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun #define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
45*4882a593Smuzhiyun #define KNAV_L_QUEUE_REGION_REG_INDEX 2
46*4882a593Smuzhiyun #define KNAV_L_QUEUE_PUSH_REG_INDEX 3
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* PDSP register indices in DTS */
49*4882a593Smuzhiyun #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
50*4882a593Smuzhiyun #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
51*4882a593Smuzhiyun #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
52*4882a593Smuzhiyun #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define knav_queue_idx_to_inst(kdev, idx) \
55*4882a593Smuzhiyun (kdev->instances + (idx << kdev->inst_shift))
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define for_each_handle_rcu(qh, inst) \
58*4882a593Smuzhiyun list_for_each_entry_rcu(qh, &inst->handles, list, \
59*4882a593Smuzhiyun knav_dev_lock_held())
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define for_each_instance(idx, inst, kdev) \
62*4882a593Smuzhiyun for (idx = 0, inst = kdev->instances; \
63*4882a593Smuzhiyun idx < (kdev)->num_queues_in_use; \
64*4882a593Smuzhiyun idx++, inst = knav_queue_idx_to_inst(kdev, idx))
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* All firmware file names end up here. List the firmware file names below.
67*4882a593Smuzhiyun * Newest followed by older ones. Search is done from start of the array
68*4882a593Smuzhiyun * until a firmware file is found.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static bool device_ready;
knav_qmss_device_ready(void)73*4882a593Smuzhiyun bool knav_qmss_device_ready(void)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun return device_ready;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun * knav_queue_notify: qmss queue notfier call
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * @inst: qmss queue instance like accumulator
83*4882a593Smuzhiyun */
knav_queue_notify(struct knav_queue_inst * inst)84*4882a593Smuzhiyun void knav_queue_notify(struct knav_queue_inst *inst)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct knav_queue *qh;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (!inst)
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun rcu_read_lock();
92*4882a593Smuzhiyun for_each_handle_rcu(qh, inst) {
93*4882a593Smuzhiyun if (atomic_read(&qh->notifier_enabled) <= 0)
94*4882a593Smuzhiyun continue;
95*4882a593Smuzhiyun if (WARN_ON(!qh->notifier_fn))
96*4882a593Smuzhiyun continue;
97*4882a593Smuzhiyun this_cpu_inc(qh->stats->notifies);
98*4882a593Smuzhiyun qh->notifier_fn(qh->notifier_fn_arg);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun rcu_read_unlock();
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_queue_notify);
103*4882a593Smuzhiyun
knav_queue_int_handler(int irq,void * _instdata)104*4882a593Smuzhiyun static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct knav_queue_inst *inst = _instdata;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun knav_queue_notify(inst);
109*4882a593Smuzhiyun return IRQ_HANDLED;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
knav_queue_setup_irq(struct knav_range_info * range,struct knav_queue_inst * inst)112*4882a593Smuzhiyun static int knav_queue_setup_irq(struct knav_range_info *range,
113*4882a593Smuzhiyun struct knav_queue_inst *inst)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun unsigned queue = inst->id - range->queue_base;
116*4882a593Smuzhiyun int ret = 0, irq;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (range->flags & RANGE_HAS_IRQ) {
119*4882a593Smuzhiyun irq = range->irqs[queue].irq;
120*4882a593Smuzhiyun ret = request_irq(irq, knav_queue_int_handler, 0,
121*4882a593Smuzhiyun inst->irq_name, inst);
122*4882a593Smuzhiyun if (ret)
123*4882a593Smuzhiyun return ret;
124*4882a593Smuzhiyun disable_irq(irq);
125*4882a593Smuzhiyun if (range->irqs[queue].cpu_mask) {
126*4882a593Smuzhiyun ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
127*4882a593Smuzhiyun if (ret) {
128*4882a593Smuzhiyun dev_warn(range->kdev->dev,
129*4882a593Smuzhiyun "Failed to set IRQ affinity\n");
130*4882a593Smuzhiyun return ret;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun return ret;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
knav_queue_free_irq(struct knav_queue_inst * inst)137*4882a593Smuzhiyun static void knav_queue_free_irq(struct knav_queue_inst *inst)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct knav_range_info *range = inst->range;
140*4882a593Smuzhiyun unsigned queue = inst->id - inst->range->queue_base;
141*4882a593Smuzhiyun int irq;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (range->flags & RANGE_HAS_IRQ) {
144*4882a593Smuzhiyun irq = range->irqs[queue].irq;
145*4882a593Smuzhiyun irq_set_affinity_hint(irq, NULL);
146*4882a593Smuzhiyun free_irq(irq, inst);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
knav_queue_is_busy(struct knav_queue_inst * inst)150*4882a593Smuzhiyun static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return !list_empty(&inst->handles);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
knav_queue_is_reserved(struct knav_queue_inst * inst)155*4882a593Smuzhiyun static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return inst->range->flags & RANGE_RESERVED;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
knav_queue_is_shared(struct knav_queue_inst * inst)160*4882a593Smuzhiyun static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct knav_queue *tmp;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun rcu_read_lock();
165*4882a593Smuzhiyun for_each_handle_rcu(tmp, inst) {
166*4882a593Smuzhiyun if (tmp->flags & KNAV_QUEUE_SHARED) {
167*4882a593Smuzhiyun rcu_read_unlock();
168*4882a593Smuzhiyun return true;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun rcu_read_unlock();
172*4882a593Smuzhiyun return false;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
knav_queue_match_type(struct knav_queue_inst * inst,unsigned type)175*4882a593Smuzhiyun static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
176*4882a593Smuzhiyun unsigned type)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun if ((type == KNAV_QUEUE_QPEND) &&
179*4882a593Smuzhiyun (inst->range->flags & RANGE_HAS_IRQ)) {
180*4882a593Smuzhiyun return true;
181*4882a593Smuzhiyun } else if ((type == KNAV_QUEUE_ACC) &&
182*4882a593Smuzhiyun (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
183*4882a593Smuzhiyun return true;
184*4882a593Smuzhiyun } else if ((type == KNAV_QUEUE_GP) &&
185*4882a593Smuzhiyun !(inst->range->flags &
186*4882a593Smuzhiyun (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
187*4882a593Smuzhiyun return true;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun return false;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun static inline struct knav_queue_inst *
knav_queue_match_id_to_inst(struct knav_device * kdev,unsigned id)193*4882a593Smuzhiyun knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct knav_queue_inst *inst;
196*4882a593Smuzhiyun int idx;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun for_each_instance(idx, inst, kdev) {
199*4882a593Smuzhiyun if (inst->id == id)
200*4882a593Smuzhiyun return inst;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun return NULL;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
knav_queue_find_by_id(int id)205*4882a593Smuzhiyun static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun if (kdev->base_id <= id &&
208*4882a593Smuzhiyun kdev->base_id + kdev->num_queues > id) {
209*4882a593Smuzhiyun id -= kdev->base_id;
210*4882a593Smuzhiyun return knav_queue_match_id_to_inst(kdev, id);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun return NULL;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
__knav_queue_open(struct knav_queue_inst * inst,const char * name,unsigned flags)215*4882a593Smuzhiyun static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
216*4882a593Smuzhiyun const char *name, unsigned flags)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct knav_queue *qh;
219*4882a593Smuzhiyun unsigned id;
220*4882a593Smuzhiyun int ret = 0;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
223*4882a593Smuzhiyun if (!qh)
224*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun qh->stats = alloc_percpu(struct knav_queue_stats);
227*4882a593Smuzhiyun if (!qh->stats) {
228*4882a593Smuzhiyun ret = -ENOMEM;
229*4882a593Smuzhiyun goto err;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun qh->flags = flags;
233*4882a593Smuzhiyun qh->inst = inst;
234*4882a593Smuzhiyun id = inst->id - inst->qmgr->start_queue;
235*4882a593Smuzhiyun qh->reg_push = &inst->qmgr->reg_push[id];
236*4882a593Smuzhiyun qh->reg_pop = &inst->qmgr->reg_pop[id];
237*4882a593Smuzhiyun qh->reg_peek = &inst->qmgr->reg_peek[id];
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* first opener? */
240*4882a593Smuzhiyun if (!knav_queue_is_busy(inst)) {
241*4882a593Smuzhiyun struct knav_range_info *range = inst->range;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
244*4882a593Smuzhiyun if (range->ops && range->ops->open_queue)
245*4882a593Smuzhiyun ret = range->ops->open_queue(range, inst, flags);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (ret)
248*4882a593Smuzhiyun goto err;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun list_add_tail_rcu(&qh->list, &inst->handles);
251*4882a593Smuzhiyun return qh;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun err:
254*4882a593Smuzhiyun if (qh->stats)
255*4882a593Smuzhiyun free_percpu(qh->stats);
256*4882a593Smuzhiyun devm_kfree(inst->kdev->dev, qh);
257*4882a593Smuzhiyun return ERR_PTR(ret);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun static struct knav_queue *
knav_queue_open_by_id(const char * name,unsigned id,unsigned flags)261*4882a593Smuzhiyun knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct knav_queue_inst *inst;
264*4882a593Smuzhiyun struct knav_queue *qh;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun mutex_lock(&knav_dev_lock);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun qh = ERR_PTR(-ENODEV);
269*4882a593Smuzhiyun inst = knav_queue_find_by_id(id);
270*4882a593Smuzhiyun if (!inst)
271*4882a593Smuzhiyun goto unlock_ret;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun qh = ERR_PTR(-EEXIST);
274*4882a593Smuzhiyun if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
275*4882a593Smuzhiyun goto unlock_ret;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun qh = ERR_PTR(-EBUSY);
278*4882a593Smuzhiyun if ((flags & KNAV_QUEUE_SHARED) &&
279*4882a593Smuzhiyun (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
280*4882a593Smuzhiyun goto unlock_ret;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun qh = __knav_queue_open(inst, name, flags);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun unlock_ret:
285*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return qh;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
knav_queue_open_by_type(const char * name,unsigned type,unsigned flags)290*4882a593Smuzhiyun static struct knav_queue *knav_queue_open_by_type(const char *name,
291*4882a593Smuzhiyun unsigned type, unsigned flags)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct knav_queue_inst *inst;
294*4882a593Smuzhiyun struct knav_queue *qh = ERR_PTR(-EINVAL);
295*4882a593Smuzhiyun int idx;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun mutex_lock(&knav_dev_lock);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun for_each_instance(idx, inst, kdev) {
300*4882a593Smuzhiyun if (knav_queue_is_reserved(inst))
301*4882a593Smuzhiyun continue;
302*4882a593Smuzhiyun if (!knav_queue_match_type(inst, type))
303*4882a593Smuzhiyun continue;
304*4882a593Smuzhiyun if (knav_queue_is_busy(inst))
305*4882a593Smuzhiyun continue;
306*4882a593Smuzhiyun qh = __knav_queue_open(inst, name, flags);
307*4882a593Smuzhiyun goto unlock_ret;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun unlock_ret:
311*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
312*4882a593Smuzhiyun return qh;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
knav_queue_set_notify(struct knav_queue_inst * inst,bool enabled)315*4882a593Smuzhiyun static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct knav_range_info *range = inst->range;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (range->ops && range->ops->set_notify)
320*4882a593Smuzhiyun range->ops->set_notify(range, inst, enabled);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
knav_queue_enable_notifier(struct knav_queue * qh)323*4882a593Smuzhiyun static int knav_queue_enable_notifier(struct knav_queue *qh)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct knav_queue_inst *inst = qh->inst;
326*4882a593Smuzhiyun bool first;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (WARN_ON(!qh->notifier_fn))
329*4882a593Smuzhiyun return -EINVAL;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* Adjust the per handle notifier count */
332*4882a593Smuzhiyun first = (atomic_inc_return(&qh->notifier_enabled) == 1);
333*4882a593Smuzhiyun if (!first)
334*4882a593Smuzhiyun return 0; /* nothing to do */
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* Now adjust the per instance notifier count */
337*4882a593Smuzhiyun first = (atomic_inc_return(&inst->num_notifiers) == 1);
338*4882a593Smuzhiyun if (first)
339*4882a593Smuzhiyun knav_queue_set_notify(inst, true);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
knav_queue_disable_notifier(struct knav_queue * qh)344*4882a593Smuzhiyun static int knav_queue_disable_notifier(struct knav_queue *qh)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct knav_queue_inst *inst = qh->inst;
347*4882a593Smuzhiyun bool last;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun last = (atomic_dec_return(&qh->notifier_enabled) == 0);
350*4882a593Smuzhiyun if (!last)
351*4882a593Smuzhiyun return 0; /* nothing to do */
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun last = (atomic_dec_return(&inst->num_notifiers) == 0);
354*4882a593Smuzhiyun if (last)
355*4882a593Smuzhiyun knav_queue_set_notify(inst, false);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
knav_queue_set_notifier(struct knav_queue * qh,struct knav_queue_notify_config * cfg)360*4882a593Smuzhiyun static int knav_queue_set_notifier(struct knav_queue *qh,
361*4882a593Smuzhiyun struct knav_queue_notify_config *cfg)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun knav_queue_notify_fn old_fn = qh->notifier_fn;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!cfg)
366*4882a593Smuzhiyun return -EINVAL;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
369*4882a593Smuzhiyun return -ENOTSUPP;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (!cfg->fn && old_fn)
372*4882a593Smuzhiyun knav_queue_disable_notifier(qh);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun qh->notifier_fn = cfg->fn;
375*4882a593Smuzhiyun qh->notifier_fn_arg = cfg->fn_arg;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (cfg->fn && !old_fn)
378*4882a593Smuzhiyun knav_queue_enable_notifier(qh);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
knav_gp_set_notify(struct knav_range_info * range,struct knav_queue_inst * inst,bool enabled)383*4882a593Smuzhiyun static int knav_gp_set_notify(struct knav_range_info *range,
384*4882a593Smuzhiyun struct knav_queue_inst *inst,
385*4882a593Smuzhiyun bool enabled)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun unsigned queue;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (range->flags & RANGE_HAS_IRQ) {
390*4882a593Smuzhiyun queue = inst->id - range->queue_base;
391*4882a593Smuzhiyun if (enabled)
392*4882a593Smuzhiyun enable_irq(range->irqs[queue].irq);
393*4882a593Smuzhiyun else
394*4882a593Smuzhiyun disable_irq_nosync(range->irqs[queue].irq);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
knav_gp_open_queue(struct knav_range_info * range,struct knav_queue_inst * inst,unsigned flags)399*4882a593Smuzhiyun static int knav_gp_open_queue(struct knav_range_info *range,
400*4882a593Smuzhiyun struct knav_queue_inst *inst, unsigned flags)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun return knav_queue_setup_irq(range, inst);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
knav_gp_close_queue(struct knav_range_info * range,struct knav_queue_inst * inst)405*4882a593Smuzhiyun static int knav_gp_close_queue(struct knav_range_info *range,
406*4882a593Smuzhiyun struct knav_queue_inst *inst)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun knav_queue_free_irq(inst);
409*4882a593Smuzhiyun return 0;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun static struct knav_range_ops knav_gp_range_ops = {
413*4882a593Smuzhiyun .set_notify = knav_gp_set_notify,
414*4882a593Smuzhiyun .open_queue = knav_gp_open_queue,
415*4882a593Smuzhiyun .close_queue = knav_gp_close_queue,
416*4882a593Smuzhiyun };
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun
knav_queue_get_count(void * qhandle)419*4882a593Smuzhiyun static int knav_queue_get_count(void *qhandle)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct knav_queue *qh = qhandle;
422*4882a593Smuzhiyun struct knav_queue_inst *inst = qh->inst;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun return readl_relaxed(&qh->reg_peek[0].entry_count) +
425*4882a593Smuzhiyun atomic_read(&inst->desc_count);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
knav_queue_debug_show_instance(struct seq_file * s,struct knav_queue_inst * inst)428*4882a593Smuzhiyun static void knav_queue_debug_show_instance(struct seq_file *s,
429*4882a593Smuzhiyun struct knav_queue_inst *inst)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct knav_device *kdev = inst->kdev;
432*4882a593Smuzhiyun struct knav_queue *qh;
433*4882a593Smuzhiyun int cpu = 0;
434*4882a593Smuzhiyun int pushes = 0;
435*4882a593Smuzhiyun int pops = 0;
436*4882a593Smuzhiyun int push_errors = 0;
437*4882a593Smuzhiyun int pop_errors = 0;
438*4882a593Smuzhiyun int notifies = 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!knav_queue_is_busy(inst))
441*4882a593Smuzhiyun return;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun seq_printf(s, "\tqueue id %d (%s)\n",
444*4882a593Smuzhiyun kdev->base_id + inst->id, inst->name);
445*4882a593Smuzhiyun for_each_handle_rcu(qh, inst) {
446*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
447*4882a593Smuzhiyun pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
448*4882a593Smuzhiyun pops += per_cpu_ptr(qh->stats, cpu)->pops;
449*4882a593Smuzhiyun push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
450*4882a593Smuzhiyun pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
451*4882a593Smuzhiyun notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
455*4882a593Smuzhiyun qh,
456*4882a593Smuzhiyun pushes,
457*4882a593Smuzhiyun pops,
458*4882a593Smuzhiyun knav_queue_get_count(qh),
459*4882a593Smuzhiyun notifies,
460*4882a593Smuzhiyun push_errors,
461*4882a593Smuzhiyun pop_errors);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
knav_queue_debug_show(struct seq_file * s,void * v)465*4882a593Smuzhiyun static int knav_queue_debug_show(struct seq_file *s, void *v)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct knav_queue_inst *inst;
468*4882a593Smuzhiyun int idx;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun mutex_lock(&knav_dev_lock);
471*4882a593Smuzhiyun seq_printf(s, "%s: %u-%u\n",
472*4882a593Smuzhiyun dev_name(kdev->dev), kdev->base_id,
473*4882a593Smuzhiyun kdev->base_id + kdev->num_queues - 1);
474*4882a593Smuzhiyun for_each_instance(idx, inst, kdev)
475*4882a593Smuzhiyun knav_queue_debug_show_instance(s, inst);
476*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun return 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
482*4882a593Smuzhiyun
knav_queue_pdsp_wait(u32 * __iomem addr,unsigned timeout,u32 flags)483*4882a593Smuzhiyun static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
484*4882a593Smuzhiyun u32 flags)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun unsigned long end;
487*4882a593Smuzhiyun u32 val = 0;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun end = jiffies + msecs_to_jiffies(timeout);
490*4882a593Smuzhiyun while (time_after(end, jiffies)) {
491*4882a593Smuzhiyun val = readl_relaxed(addr);
492*4882a593Smuzhiyun if (flags)
493*4882a593Smuzhiyun val &= flags;
494*4882a593Smuzhiyun if (!val)
495*4882a593Smuzhiyun break;
496*4882a593Smuzhiyun cpu_relax();
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun return val ? -ETIMEDOUT : 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun
knav_queue_flush(struct knav_queue * qh)502*4882a593Smuzhiyun static int knav_queue_flush(struct knav_queue *qh)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun struct knav_queue_inst *inst = qh->inst;
505*4882a593Smuzhiyun unsigned id = inst->id - inst->qmgr->start_queue;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun atomic_set(&inst->desc_count, 0);
508*4882a593Smuzhiyun writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
509*4882a593Smuzhiyun return 0;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /**
513*4882a593Smuzhiyun * knav_queue_open() - open a hardware queue
514*4882a593Smuzhiyun * @name - name to give the queue handle
515*4882a593Smuzhiyun * @id - desired queue number if any or specifes the type
516*4882a593Smuzhiyun * of queue
517*4882a593Smuzhiyun * @flags - the following flags are applicable to queues:
518*4882a593Smuzhiyun * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
519*4882a593Smuzhiyun * exclusive by default.
520*4882a593Smuzhiyun * Subsequent attempts to open a shared queue should
521*4882a593Smuzhiyun * also have this flag.
522*4882a593Smuzhiyun *
523*4882a593Smuzhiyun * Returns a handle to the open hardware queue if successful. Use IS_ERR()
524*4882a593Smuzhiyun * to check the returned value for error codes.
525*4882a593Smuzhiyun */
knav_queue_open(const char * name,unsigned id,unsigned flags)526*4882a593Smuzhiyun void *knav_queue_open(const char *name, unsigned id,
527*4882a593Smuzhiyun unsigned flags)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct knav_queue *qh = ERR_PTR(-EINVAL);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun switch (id) {
532*4882a593Smuzhiyun case KNAV_QUEUE_QPEND:
533*4882a593Smuzhiyun case KNAV_QUEUE_ACC:
534*4882a593Smuzhiyun case KNAV_QUEUE_GP:
535*4882a593Smuzhiyun qh = knav_queue_open_by_type(name, id, flags);
536*4882a593Smuzhiyun break;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun default:
539*4882a593Smuzhiyun qh = knav_queue_open_by_id(name, id, flags);
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun return qh;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_queue_open);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * knav_queue_close() - close a hardware queue handle
548*4882a593Smuzhiyun * @qh - handle to close
549*4882a593Smuzhiyun */
knav_queue_close(void * qhandle)550*4882a593Smuzhiyun void knav_queue_close(void *qhandle)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct knav_queue *qh = qhandle;
553*4882a593Smuzhiyun struct knav_queue_inst *inst = qh->inst;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun while (atomic_read(&qh->notifier_enabled) > 0)
556*4882a593Smuzhiyun knav_queue_disable_notifier(qh);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun mutex_lock(&knav_dev_lock);
559*4882a593Smuzhiyun list_del_rcu(&qh->list);
560*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
561*4882a593Smuzhiyun synchronize_rcu();
562*4882a593Smuzhiyun if (!knav_queue_is_busy(inst)) {
563*4882a593Smuzhiyun struct knav_range_info *range = inst->range;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (range->ops && range->ops->close_queue)
566*4882a593Smuzhiyun range->ops->close_queue(range, inst);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun free_percpu(qh->stats);
569*4882a593Smuzhiyun devm_kfree(inst->kdev->dev, qh);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_queue_close);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun * knav_queue_device_control() - Perform control operations on a queue
575*4882a593Smuzhiyun * @qh - queue handle
576*4882a593Smuzhiyun * @cmd - control commands
577*4882a593Smuzhiyun * @arg - command argument
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * Returns 0 on success, errno otherwise.
580*4882a593Smuzhiyun */
knav_queue_device_control(void * qhandle,enum knav_queue_ctrl_cmd cmd,unsigned long arg)581*4882a593Smuzhiyun int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
582*4882a593Smuzhiyun unsigned long arg)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun struct knav_queue *qh = qhandle;
585*4882a593Smuzhiyun struct knav_queue_notify_config *cfg;
586*4882a593Smuzhiyun int ret;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun switch ((int)cmd) {
589*4882a593Smuzhiyun case KNAV_QUEUE_GET_ID:
590*4882a593Smuzhiyun ret = qh->inst->kdev->base_id + qh->inst->id;
591*4882a593Smuzhiyun break;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun case KNAV_QUEUE_FLUSH:
594*4882a593Smuzhiyun ret = knav_queue_flush(qh);
595*4882a593Smuzhiyun break;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun case KNAV_QUEUE_SET_NOTIFIER:
598*4882a593Smuzhiyun cfg = (void *)arg;
599*4882a593Smuzhiyun ret = knav_queue_set_notifier(qh, cfg);
600*4882a593Smuzhiyun break;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun case KNAV_QUEUE_ENABLE_NOTIFY:
603*4882a593Smuzhiyun ret = knav_queue_enable_notifier(qh);
604*4882a593Smuzhiyun break;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun case KNAV_QUEUE_DISABLE_NOTIFY:
607*4882a593Smuzhiyun ret = knav_queue_disable_notifier(qh);
608*4882a593Smuzhiyun break;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun case KNAV_QUEUE_GET_COUNT:
611*4882a593Smuzhiyun ret = knav_queue_get_count(qh);
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun default:
615*4882a593Smuzhiyun ret = -ENOTSUPP;
616*4882a593Smuzhiyun break;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun return ret;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_queue_device_control);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /**
625*4882a593Smuzhiyun * knav_queue_push() - push data (or descriptor) to the tail of a queue
626*4882a593Smuzhiyun * @qh - hardware queue handle
627*4882a593Smuzhiyun * @data - data to push
628*4882a593Smuzhiyun * @size - size of data to push
629*4882a593Smuzhiyun * @flags - can be used to pass additional information
630*4882a593Smuzhiyun *
631*4882a593Smuzhiyun * Returns 0 on success, errno otherwise.
632*4882a593Smuzhiyun */
knav_queue_push(void * qhandle,dma_addr_t dma,unsigned size,unsigned flags)633*4882a593Smuzhiyun int knav_queue_push(void *qhandle, dma_addr_t dma,
634*4882a593Smuzhiyun unsigned size, unsigned flags)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun struct knav_queue *qh = qhandle;
637*4882a593Smuzhiyun u32 val;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun val = (u32)dma | ((size / 16) - 1);
640*4882a593Smuzhiyun writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun this_cpu_inc(qh->stats->pushes);
643*4882a593Smuzhiyun return 0;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_queue_push);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /**
648*4882a593Smuzhiyun * knav_queue_pop() - pop data (or descriptor) from the head of a queue
649*4882a593Smuzhiyun * @qh - hardware queue handle
650*4882a593Smuzhiyun * @size - (optional) size of the data pop'ed.
651*4882a593Smuzhiyun *
652*4882a593Smuzhiyun * Returns a DMA address on success, 0 on failure.
653*4882a593Smuzhiyun */
knav_queue_pop(void * qhandle,unsigned * size)654*4882a593Smuzhiyun dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct knav_queue *qh = qhandle;
657*4882a593Smuzhiyun struct knav_queue_inst *inst = qh->inst;
658*4882a593Smuzhiyun dma_addr_t dma;
659*4882a593Smuzhiyun u32 val, idx;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /* are we accumulated? */
662*4882a593Smuzhiyun if (inst->descs) {
663*4882a593Smuzhiyun if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
664*4882a593Smuzhiyun atomic_inc(&inst->desc_count);
665*4882a593Smuzhiyun return 0;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun idx = atomic_inc_return(&inst->desc_head);
668*4882a593Smuzhiyun idx &= ACC_DESCS_MASK;
669*4882a593Smuzhiyun val = inst->descs[idx];
670*4882a593Smuzhiyun } else {
671*4882a593Smuzhiyun val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
672*4882a593Smuzhiyun if (unlikely(!val))
673*4882a593Smuzhiyun return 0;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun dma = val & DESC_PTR_MASK;
677*4882a593Smuzhiyun if (size)
678*4882a593Smuzhiyun *size = ((val & DESC_SIZE_MASK) + 1) * 16;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun this_cpu_inc(qh->stats->pops);
681*4882a593Smuzhiyun return dma;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_queue_pop);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* carve out descriptors and push into queue */
kdesc_fill_pool(struct knav_pool * pool)686*4882a593Smuzhiyun static void kdesc_fill_pool(struct knav_pool *pool)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun struct knav_region *region;
689*4882a593Smuzhiyun int i;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun region = pool->region;
692*4882a593Smuzhiyun pool->desc_size = region->desc_size;
693*4882a593Smuzhiyun for (i = 0; i < pool->num_desc; i++) {
694*4882a593Smuzhiyun int index = pool->region_offset + i;
695*4882a593Smuzhiyun dma_addr_t dma_addr;
696*4882a593Smuzhiyun unsigned dma_size;
697*4882a593Smuzhiyun dma_addr = region->dma_start + (region->desc_size * index);
698*4882a593Smuzhiyun dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
699*4882a593Smuzhiyun dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
700*4882a593Smuzhiyun DMA_TO_DEVICE);
701*4882a593Smuzhiyun knav_queue_push(pool->queue, dma_addr, dma_size, 0);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /* pop out descriptors and close the queue */
kdesc_empty_pool(struct knav_pool * pool)706*4882a593Smuzhiyun static void kdesc_empty_pool(struct knav_pool *pool)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun dma_addr_t dma;
709*4882a593Smuzhiyun unsigned size;
710*4882a593Smuzhiyun void *desc;
711*4882a593Smuzhiyun int i;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (!pool->queue)
714*4882a593Smuzhiyun return;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun for (i = 0;; i++) {
717*4882a593Smuzhiyun dma = knav_queue_pop(pool->queue, &size);
718*4882a593Smuzhiyun if (!dma)
719*4882a593Smuzhiyun break;
720*4882a593Smuzhiyun desc = knav_pool_desc_dma_to_virt(pool, dma);
721*4882a593Smuzhiyun if (!desc) {
722*4882a593Smuzhiyun dev_dbg(pool->kdev->dev,
723*4882a593Smuzhiyun "couldn't unmap desc, continuing\n");
724*4882a593Smuzhiyun continue;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun WARN_ON(i != pool->num_desc);
728*4882a593Smuzhiyun knav_queue_close(pool->queue);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Get the DMA address of a descriptor */
knav_pool_desc_virt_to_dma(void * ph,void * virt)733*4882a593Smuzhiyun dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct knav_pool *pool = ph;
736*4882a593Smuzhiyun return pool->region->dma_start + (virt - pool->region->virt_start);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
739*4882a593Smuzhiyun
knav_pool_desc_dma_to_virt(void * ph,dma_addr_t dma)740*4882a593Smuzhiyun void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun struct knav_pool *pool = ph;
743*4882a593Smuzhiyun return pool->region->virt_start + (dma - pool->region->dma_start);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /**
748*4882a593Smuzhiyun * knav_pool_create() - Create a pool of descriptors
749*4882a593Smuzhiyun * @name - name to give the pool handle
750*4882a593Smuzhiyun * @num_desc - numbers of descriptors in the pool
751*4882a593Smuzhiyun * @region_id - QMSS region id from which the descriptors are to be
752*4882a593Smuzhiyun * allocated.
753*4882a593Smuzhiyun *
754*4882a593Smuzhiyun * Returns a pool handle on success.
755*4882a593Smuzhiyun * Use IS_ERR_OR_NULL() to identify error values on return.
756*4882a593Smuzhiyun */
knav_pool_create(const char * name,int num_desc,int region_id)757*4882a593Smuzhiyun void *knav_pool_create(const char *name,
758*4882a593Smuzhiyun int num_desc, int region_id)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct knav_region *reg_itr, *region = NULL;
761*4882a593Smuzhiyun struct knav_pool *pool, *pi;
762*4882a593Smuzhiyun struct list_head *node;
763*4882a593Smuzhiyun unsigned last_offset;
764*4882a593Smuzhiyun bool slot_found;
765*4882a593Smuzhiyun int ret;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun if (!kdev)
768*4882a593Smuzhiyun return ERR_PTR(-EPROBE_DEFER);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (!kdev->dev)
771*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
774*4882a593Smuzhiyun if (!pool) {
775*4882a593Smuzhiyun dev_err(kdev->dev, "out of memory allocating pool\n");
776*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun for_each_region(kdev, reg_itr) {
780*4882a593Smuzhiyun if (reg_itr->id != region_id)
781*4882a593Smuzhiyun continue;
782*4882a593Smuzhiyun region = reg_itr;
783*4882a593Smuzhiyun break;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (!region) {
787*4882a593Smuzhiyun dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
788*4882a593Smuzhiyun ret = -EINVAL;
789*4882a593Smuzhiyun goto err;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
793*4882a593Smuzhiyun if (IS_ERR_OR_NULL(pool->queue)) {
794*4882a593Smuzhiyun dev_err(kdev->dev,
795*4882a593Smuzhiyun "failed to open queue for pool(%s), error %ld\n",
796*4882a593Smuzhiyun name, PTR_ERR(pool->queue));
797*4882a593Smuzhiyun ret = PTR_ERR(pool->queue);
798*4882a593Smuzhiyun goto err;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
802*4882a593Smuzhiyun pool->kdev = kdev;
803*4882a593Smuzhiyun pool->dev = kdev->dev;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun mutex_lock(&knav_dev_lock);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun if (num_desc > (region->num_desc - region->used_desc)) {
808*4882a593Smuzhiyun dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
809*4882a593Smuzhiyun region_id, name);
810*4882a593Smuzhiyun ret = -ENOMEM;
811*4882a593Smuzhiyun goto err_unlock;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* Region maintains a sorted (by region offset) list of pools
815*4882a593Smuzhiyun * use the first free slot which is large enough to accomodate
816*4882a593Smuzhiyun * the request
817*4882a593Smuzhiyun */
818*4882a593Smuzhiyun last_offset = 0;
819*4882a593Smuzhiyun slot_found = false;
820*4882a593Smuzhiyun node = ®ion->pools;
821*4882a593Smuzhiyun list_for_each_entry(pi, ®ion->pools, region_inst) {
822*4882a593Smuzhiyun if ((pi->region_offset - last_offset) >= num_desc) {
823*4882a593Smuzhiyun slot_found = true;
824*4882a593Smuzhiyun break;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun last_offset = pi->region_offset + pi->num_desc;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun node = &pi->region_inst;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (slot_found) {
831*4882a593Smuzhiyun pool->region = region;
832*4882a593Smuzhiyun pool->num_desc = num_desc;
833*4882a593Smuzhiyun pool->region_offset = last_offset;
834*4882a593Smuzhiyun region->used_desc += num_desc;
835*4882a593Smuzhiyun list_add_tail(&pool->list, &kdev->pools);
836*4882a593Smuzhiyun list_add_tail(&pool->region_inst, node);
837*4882a593Smuzhiyun } else {
838*4882a593Smuzhiyun dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
839*4882a593Smuzhiyun name, region_id);
840*4882a593Smuzhiyun ret = -ENOMEM;
841*4882a593Smuzhiyun goto err_unlock;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
845*4882a593Smuzhiyun kdesc_fill_pool(pool);
846*4882a593Smuzhiyun return pool;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun err_unlock:
849*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
850*4882a593Smuzhiyun err:
851*4882a593Smuzhiyun kfree(pool->name);
852*4882a593Smuzhiyun devm_kfree(kdev->dev, pool);
853*4882a593Smuzhiyun return ERR_PTR(ret);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_create);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /**
858*4882a593Smuzhiyun * knav_pool_destroy() - Free a pool of descriptors
859*4882a593Smuzhiyun * @pool - pool handle
860*4882a593Smuzhiyun */
knav_pool_destroy(void * ph)861*4882a593Smuzhiyun void knav_pool_destroy(void *ph)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct knav_pool *pool = ph;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (!pool)
866*4882a593Smuzhiyun return;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun if (!pool->region)
869*4882a593Smuzhiyun return;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun kdesc_empty_pool(pool);
872*4882a593Smuzhiyun mutex_lock(&knav_dev_lock);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun pool->region->used_desc -= pool->num_desc;
875*4882a593Smuzhiyun list_del(&pool->region_inst);
876*4882a593Smuzhiyun list_del(&pool->list);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun mutex_unlock(&knav_dev_lock);
879*4882a593Smuzhiyun kfree(pool->name);
880*4882a593Smuzhiyun devm_kfree(kdev->dev, pool);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_destroy);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /**
886*4882a593Smuzhiyun * knav_pool_desc_get() - Get a descriptor from the pool
887*4882a593Smuzhiyun * @pool - pool handle
888*4882a593Smuzhiyun *
889*4882a593Smuzhiyun * Returns descriptor from the pool.
890*4882a593Smuzhiyun */
knav_pool_desc_get(void * ph)891*4882a593Smuzhiyun void *knav_pool_desc_get(void *ph)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun struct knav_pool *pool = ph;
894*4882a593Smuzhiyun dma_addr_t dma;
895*4882a593Smuzhiyun unsigned size;
896*4882a593Smuzhiyun void *data;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun dma = knav_queue_pop(pool->queue, &size);
899*4882a593Smuzhiyun if (unlikely(!dma))
900*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
901*4882a593Smuzhiyun data = knav_pool_desc_dma_to_virt(pool, dma);
902*4882a593Smuzhiyun return data;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_desc_get);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun * knav_pool_desc_put() - return a descriptor to the pool
908*4882a593Smuzhiyun * @pool - pool handle
909*4882a593Smuzhiyun */
knav_pool_desc_put(void * ph,void * desc)910*4882a593Smuzhiyun void knav_pool_desc_put(void *ph, void *desc)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct knav_pool *pool = ph;
913*4882a593Smuzhiyun dma_addr_t dma;
914*4882a593Smuzhiyun dma = knav_pool_desc_virt_to_dma(pool, desc);
915*4882a593Smuzhiyun knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_desc_put);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /**
920*4882a593Smuzhiyun * knav_pool_desc_map() - Map descriptor for DMA transfer
921*4882a593Smuzhiyun * @pool - pool handle
922*4882a593Smuzhiyun * @desc - address of descriptor to map
923*4882a593Smuzhiyun * @size - size of descriptor to map
924*4882a593Smuzhiyun * @dma - DMA address return pointer
925*4882a593Smuzhiyun * @dma_sz - adjusted return pointer
926*4882a593Smuzhiyun *
927*4882a593Smuzhiyun * Returns 0 on success, errno otherwise.
928*4882a593Smuzhiyun */
knav_pool_desc_map(void * ph,void * desc,unsigned size,dma_addr_t * dma,unsigned * dma_sz)929*4882a593Smuzhiyun int knav_pool_desc_map(void *ph, void *desc, unsigned size,
930*4882a593Smuzhiyun dma_addr_t *dma, unsigned *dma_sz)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun struct knav_pool *pool = ph;
933*4882a593Smuzhiyun *dma = knav_pool_desc_virt_to_dma(pool, desc);
934*4882a593Smuzhiyun size = min(size, pool->region->desc_size);
935*4882a593Smuzhiyun size = ALIGN(size, SMP_CACHE_BYTES);
936*4882a593Smuzhiyun *dma_sz = size;
937*4882a593Smuzhiyun dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun /* Ensure the descriptor reaches to the memory */
940*4882a593Smuzhiyun __iowmb();
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun return 0;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_desc_map);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /**
947*4882a593Smuzhiyun * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
948*4882a593Smuzhiyun * @pool - pool handle
949*4882a593Smuzhiyun * @dma - DMA address of descriptor to unmap
950*4882a593Smuzhiyun * @dma_sz - size of descriptor to unmap
951*4882a593Smuzhiyun *
952*4882a593Smuzhiyun * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
953*4882a593Smuzhiyun * error values on return.
954*4882a593Smuzhiyun */
knav_pool_desc_unmap(void * ph,dma_addr_t dma,unsigned dma_sz)955*4882a593Smuzhiyun void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun struct knav_pool *pool = ph;
958*4882a593Smuzhiyun unsigned desc_sz;
959*4882a593Smuzhiyun void *desc;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun desc_sz = min(dma_sz, pool->region->desc_size);
962*4882a593Smuzhiyun desc = knav_pool_desc_dma_to_virt(pool, dma);
963*4882a593Smuzhiyun dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
964*4882a593Smuzhiyun prefetch(desc);
965*4882a593Smuzhiyun return desc;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /**
970*4882a593Smuzhiyun * knav_pool_count() - Get the number of descriptors in pool.
971*4882a593Smuzhiyun * @pool - pool handle
972*4882a593Smuzhiyun * Returns number of elements in the pool.
973*4882a593Smuzhiyun */
knav_pool_count(void * ph)974*4882a593Smuzhiyun int knav_pool_count(void *ph)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun struct knav_pool *pool = ph;
977*4882a593Smuzhiyun return knav_queue_get_count(pool->queue);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_pool_count);
980*4882a593Smuzhiyun
knav_queue_setup_region(struct knav_device * kdev,struct knav_region * region)981*4882a593Smuzhiyun static void knav_queue_setup_region(struct knav_device *kdev,
982*4882a593Smuzhiyun struct knav_region *region)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun unsigned hw_num_desc, hw_desc_size, size;
985*4882a593Smuzhiyun struct knav_reg_region __iomem *regs;
986*4882a593Smuzhiyun struct knav_qmgr_info *qmgr;
987*4882a593Smuzhiyun struct knav_pool *pool;
988*4882a593Smuzhiyun int id = region->id;
989*4882a593Smuzhiyun struct page *page;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* unused region? */
992*4882a593Smuzhiyun if (!region->num_desc) {
993*4882a593Smuzhiyun dev_warn(kdev->dev, "unused region %s\n", region->name);
994*4882a593Smuzhiyun return;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* get hardware descriptor value */
998*4882a593Smuzhiyun hw_num_desc = ilog2(region->num_desc - 1) + 1;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /* did we force fit ourselves into nothingness? */
1001*4882a593Smuzhiyun if (region->num_desc < 32) {
1002*4882a593Smuzhiyun region->num_desc = 0;
1003*4882a593Smuzhiyun dev_warn(kdev->dev, "too few descriptors in region %s\n",
1004*4882a593Smuzhiyun region->name);
1005*4882a593Smuzhiyun return;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun size = region->num_desc * region->desc_size;
1009*4882a593Smuzhiyun region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1010*4882a593Smuzhiyun GFP_DMA32);
1011*4882a593Smuzhiyun if (!region->virt_start) {
1012*4882a593Smuzhiyun region->num_desc = 0;
1013*4882a593Smuzhiyun dev_err(kdev->dev, "memory alloc failed for region %s\n",
1014*4882a593Smuzhiyun region->name);
1015*4882a593Smuzhiyun return;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun region->virt_end = region->virt_start + size;
1018*4882a593Smuzhiyun page = virt_to_page(region->virt_start);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1021*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1022*4882a593Smuzhiyun if (dma_mapping_error(kdev->dev, region->dma_start)) {
1023*4882a593Smuzhiyun dev_err(kdev->dev, "dma map failed for region %s\n",
1024*4882a593Smuzhiyun region->name);
1025*4882a593Smuzhiyun goto fail;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun region->dma_end = region->dma_start + size;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1030*4882a593Smuzhiyun if (!pool) {
1031*4882a593Smuzhiyun dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1032*4882a593Smuzhiyun goto fail;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun pool->num_desc = 0;
1035*4882a593Smuzhiyun pool->region_offset = region->num_desc;
1036*4882a593Smuzhiyun list_add(&pool->region_inst, ®ion->pools);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun dev_dbg(kdev->dev,
1039*4882a593Smuzhiyun "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1040*4882a593Smuzhiyun region->name, id, region->desc_size, region->num_desc,
1041*4882a593Smuzhiyun region->link_index, ®ion->dma_start, ®ion->dma_end,
1042*4882a593Smuzhiyun region->virt_start, region->virt_end);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun hw_desc_size = (region->desc_size / 16) - 1;
1045*4882a593Smuzhiyun hw_num_desc -= 5;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun for_each_qmgr(kdev, qmgr) {
1048*4882a593Smuzhiyun regs = qmgr->reg_region + id;
1049*4882a593Smuzhiyun writel_relaxed((u32)region->dma_start, ®s->base);
1050*4882a593Smuzhiyun writel_relaxed(region->link_index, ®s->start_index);
1051*4882a593Smuzhiyun writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1052*4882a593Smuzhiyun ®s->size_count);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun return;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun fail:
1057*4882a593Smuzhiyun if (region->dma_start)
1058*4882a593Smuzhiyun dma_unmap_page(kdev->dev, region->dma_start, size,
1059*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1060*4882a593Smuzhiyun if (region->virt_start)
1061*4882a593Smuzhiyun free_pages_exact(region->virt_start, size);
1062*4882a593Smuzhiyun region->num_desc = 0;
1063*4882a593Smuzhiyun return;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
knav_queue_find_name(struct device_node * node)1066*4882a593Smuzhiyun static const char *knav_queue_find_name(struct device_node *node)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun const char *name;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (of_property_read_string(node, "label", &name) < 0)
1071*4882a593Smuzhiyun name = node->name;
1072*4882a593Smuzhiyun if (!name)
1073*4882a593Smuzhiyun name = "unknown";
1074*4882a593Smuzhiyun return name;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
knav_queue_setup_regions(struct knav_device * kdev,struct device_node * regions)1077*4882a593Smuzhiyun static int knav_queue_setup_regions(struct knav_device *kdev,
1078*4882a593Smuzhiyun struct device_node *regions)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun struct device *dev = kdev->dev;
1081*4882a593Smuzhiyun struct knav_region *region;
1082*4882a593Smuzhiyun struct device_node *child;
1083*4882a593Smuzhiyun u32 temp[2];
1084*4882a593Smuzhiyun int ret;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun for_each_child_of_node(regions, child) {
1087*4882a593Smuzhiyun region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1088*4882a593Smuzhiyun if (!region) {
1089*4882a593Smuzhiyun dev_err(dev, "out of memory allocating region\n");
1090*4882a593Smuzhiyun return -ENOMEM;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun region->name = knav_queue_find_name(child);
1094*4882a593Smuzhiyun of_property_read_u32(child, "id", ®ion->id);
1095*4882a593Smuzhiyun ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1096*4882a593Smuzhiyun if (!ret) {
1097*4882a593Smuzhiyun region->num_desc = temp[0];
1098*4882a593Smuzhiyun region->desc_size = temp[1];
1099*4882a593Smuzhiyun } else {
1100*4882a593Smuzhiyun dev_err(dev, "invalid region info %s\n", region->name);
1101*4882a593Smuzhiyun devm_kfree(dev, region);
1102*4882a593Smuzhiyun continue;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if (!of_get_property(child, "link-index", NULL)) {
1106*4882a593Smuzhiyun dev_err(dev, "No link info for %s\n", region->name);
1107*4882a593Smuzhiyun devm_kfree(dev, region);
1108*4882a593Smuzhiyun continue;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun ret = of_property_read_u32(child, "link-index",
1111*4882a593Smuzhiyun ®ion->link_index);
1112*4882a593Smuzhiyun if (ret) {
1113*4882a593Smuzhiyun dev_err(dev, "link index not found for %s\n",
1114*4882a593Smuzhiyun region->name);
1115*4882a593Smuzhiyun devm_kfree(dev, region);
1116*4882a593Smuzhiyun continue;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun INIT_LIST_HEAD(®ion->pools);
1120*4882a593Smuzhiyun list_add_tail(®ion->list, &kdev->regions);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun if (list_empty(&kdev->regions)) {
1123*4882a593Smuzhiyun dev_err(dev, "no valid region information found\n");
1124*4882a593Smuzhiyun return -ENODEV;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /* Next, we run through the regions and set things up */
1128*4882a593Smuzhiyun for_each_region(kdev, region)
1129*4882a593Smuzhiyun knav_queue_setup_region(kdev, region);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun return 0;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
knav_get_link_ram(struct knav_device * kdev,const char * name,struct knav_link_ram_block * block)1134*4882a593Smuzhiyun static int knav_get_link_ram(struct knav_device *kdev,
1135*4882a593Smuzhiyun const char *name,
1136*4882a593Smuzhiyun struct knav_link_ram_block *block)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(kdev->dev);
1139*4882a593Smuzhiyun struct device_node *node = pdev->dev.of_node;
1140*4882a593Smuzhiyun u32 temp[2];
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /*
1143*4882a593Smuzhiyun * Note: link ram resources are specified in "entry" sized units. In
1144*4882a593Smuzhiyun * reality, although entries are ~40bits in hardware, we treat them as
1145*4882a593Smuzhiyun * 64-bit entities here.
1146*4882a593Smuzhiyun *
1147*4882a593Smuzhiyun * For example, to specify the internal link ram for Keystone-I class
1148*4882a593Smuzhiyun * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1149*4882a593Smuzhiyun *
1150*4882a593Smuzhiyun * This gets a bit weird when other link rams are used. For example,
1151*4882a593Smuzhiyun * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1152*4882a593Smuzhiyun * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1153*4882a593Smuzhiyun * which accounts for 64-bits per entry, for 16K entries.
1154*4882a593Smuzhiyun */
1155*4882a593Smuzhiyun if (!of_property_read_u32_array(node, name , temp, 2)) {
1156*4882a593Smuzhiyun if (temp[0]) {
1157*4882a593Smuzhiyun /*
1158*4882a593Smuzhiyun * queue_base specified => using internal or onchip
1159*4882a593Smuzhiyun * link ram WARNING - we do not "reserve" this block
1160*4882a593Smuzhiyun */
1161*4882a593Smuzhiyun block->dma = (dma_addr_t)temp[0];
1162*4882a593Smuzhiyun block->virt = NULL;
1163*4882a593Smuzhiyun block->size = temp[1];
1164*4882a593Smuzhiyun } else {
1165*4882a593Smuzhiyun block->size = temp[1];
1166*4882a593Smuzhiyun /* queue_base not specific => allocate requested size */
1167*4882a593Smuzhiyun block->virt = dmam_alloc_coherent(kdev->dev,
1168*4882a593Smuzhiyun 8 * block->size, &block->dma,
1169*4882a593Smuzhiyun GFP_KERNEL);
1170*4882a593Smuzhiyun if (!block->virt) {
1171*4882a593Smuzhiyun dev_err(kdev->dev, "failed to alloc linkram\n");
1172*4882a593Smuzhiyun return -ENOMEM;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun } else {
1176*4882a593Smuzhiyun return -ENODEV;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun return 0;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
knav_queue_setup_link_ram(struct knav_device * kdev)1181*4882a593Smuzhiyun static int knav_queue_setup_link_ram(struct knav_device *kdev)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun struct knav_link_ram_block *block;
1184*4882a593Smuzhiyun struct knav_qmgr_info *qmgr;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun for_each_qmgr(kdev, qmgr) {
1187*4882a593Smuzhiyun block = &kdev->link_rams[0];
1188*4882a593Smuzhiyun dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1189*4882a593Smuzhiyun &block->dma, block->virt, block->size);
1190*4882a593Smuzhiyun writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1191*4882a593Smuzhiyun if (kdev->version == QMSS_66AK2G)
1192*4882a593Smuzhiyun writel_relaxed(block->size,
1193*4882a593Smuzhiyun &qmgr->reg_config->link_ram_size0);
1194*4882a593Smuzhiyun else
1195*4882a593Smuzhiyun writel_relaxed(block->size - 1,
1196*4882a593Smuzhiyun &qmgr->reg_config->link_ram_size0);
1197*4882a593Smuzhiyun block++;
1198*4882a593Smuzhiyun if (!block->size)
1199*4882a593Smuzhiyun continue;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1202*4882a593Smuzhiyun &block->dma, block->virt, block->size);
1203*4882a593Smuzhiyun writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun return 0;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
knav_setup_queue_range(struct knav_device * kdev,struct device_node * node)1209*4882a593Smuzhiyun static int knav_setup_queue_range(struct knav_device *kdev,
1210*4882a593Smuzhiyun struct device_node *node)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun struct device *dev = kdev->dev;
1213*4882a593Smuzhiyun struct knav_range_info *range;
1214*4882a593Smuzhiyun struct knav_qmgr_info *qmgr;
1215*4882a593Smuzhiyun u32 temp[2], start, end, id, index;
1216*4882a593Smuzhiyun int ret, i;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1219*4882a593Smuzhiyun if (!range) {
1220*4882a593Smuzhiyun dev_err(dev, "out of memory allocating range\n");
1221*4882a593Smuzhiyun return -ENOMEM;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun range->kdev = kdev;
1225*4882a593Smuzhiyun range->name = knav_queue_find_name(node);
1226*4882a593Smuzhiyun ret = of_property_read_u32_array(node, "qrange", temp, 2);
1227*4882a593Smuzhiyun if (!ret) {
1228*4882a593Smuzhiyun range->queue_base = temp[0] - kdev->base_id;
1229*4882a593Smuzhiyun range->num_queues = temp[1];
1230*4882a593Smuzhiyun } else {
1231*4882a593Smuzhiyun dev_err(dev, "invalid queue range %s\n", range->name);
1232*4882a593Smuzhiyun devm_kfree(dev, range);
1233*4882a593Smuzhiyun return -EINVAL;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun for (i = 0; i < RANGE_MAX_IRQS; i++) {
1237*4882a593Smuzhiyun struct of_phandle_args oirq;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (of_irq_parse_one(node, i, &oirq))
1240*4882a593Smuzhiyun break;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun range->irqs[i].irq = irq_create_of_mapping(&oirq);
1243*4882a593Smuzhiyun if (range->irqs[i].irq == IRQ_NONE)
1244*4882a593Smuzhiyun break;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun range->num_irqs++;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1249*4882a593Smuzhiyun unsigned long mask;
1250*4882a593Smuzhiyun int bit;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun range->irqs[i].cpu_mask = devm_kzalloc(dev,
1253*4882a593Smuzhiyun cpumask_size(), GFP_KERNEL);
1254*4882a593Smuzhiyun if (!range->irqs[i].cpu_mask)
1255*4882a593Smuzhiyun return -ENOMEM;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun mask = (oirq.args[2] & 0x0000ff00) >> 8;
1258*4882a593Smuzhiyun for_each_set_bit(bit, &mask, BITS_PER_LONG)
1259*4882a593Smuzhiyun cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun range->num_irqs = min(range->num_irqs, range->num_queues);
1264*4882a593Smuzhiyun if (range->num_irqs)
1265*4882a593Smuzhiyun range->flags |= RANGE_HAS_IRQ;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (of_get_property(node, "qalloc-by-id", NULL))
1268*4882a593Smuzhiyun range->flags |= RANGE_RESERVED;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun if (of_get_property(node, "accumulator", NULL)) {
1271*4882a593Smuzhiyun ret = knav_init_acc_range(kdev, node, range);
1272*4882a593Smuzhiyun if (ret < 0) {
1273*4882a593Smuzhiyun devm_kfree(dev, range);
1274*4882a593Smuzhiyun return ret;
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun } else {
1277*4882a593Smuzhiyun range->ops = &knav_gp_range_ops;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun /* set threshold to 1, and flush out the queues */
1281*4882a593Smuzhiyun for_each_qmgr(kdev, qmgr) {
1282*4882a593Smuzhiyun start = max(qmgr->start_queue, range->queue_base);
1283*4882a593Smuzhiyun end = min(qmgr->start_queue + qmgr->num_queues,
1284*4882a593Smuzhiyun range->queue_base + range->num_queues);
1285*4882a593Smuzhiyun for (id = start; id < end; id++) {
1286*4882a593Smuzhiyun index = id - qmgr->start_queue;
1287*4882a593Smuzhiyun writel_relaxed(THRESH_GTE | 1,
1288*4882a593Smuzhiyun &qmgr->reg_peek[index].ptr_size_thresh);
1289*4882a593Smuzhiyun writel_relaxed(0,
1290*4882a593Smuzhiyun &qmgr->reg_push[index].ptr_size_thresh);
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun list_add_tail(&range->list, &kdev->queue_ranges);
1295*4882a593Smuzhiyun dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1296*4882a593Smuzhiyun range->name, range->queue_base,
1297*4882a593Smuzhiyun range->queue_base + range->num_queues - 1,
1298*4882a593Smuzhiyun range->num_irqs,
1299*4882a593Smuzhiyun (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1300*4882a593Smuzhiyun (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1301*4882a593Smuzhiyun (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1302*4882a593Smuzhiyun kdev->num_queues_in_use += range->num_queues;
1303*4882a593Smuzhiyun return 0;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
knav_setup_queue_pools(struct knav_device * kdev,struct device_node * queue_pools)1306*4882a593Smuzhiyun static int knav_setup_queue_pools(struct knav_device *kdev,
1307*4882a593Smuzhiyun struct device_node *queue_pools)
1308*4882a593Smuzhiyun {
1309*4882a593Smuzhiyun struct device_node *type, *range;
1310*4882a593Smuzhiyun int ret;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun for_each_child_of_node(queue_pools, type) {
1313*4882a593Smuzhiyun for_each_child_of_node(type, range) {
1314*4882a593Smuzhiyun ret = knav_setup_queue_range(kdev, range);
1315*4882a593Smuzhiyun /* return value ignored, we init the rest... */
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* ... and barf if they all failed! */
1320*4882a593Smuzhiyun if (list_empty(&kdev->queue_ranges)) {
1321*4882a593Smuzhiyun dev_err(kdev->dev, "no valid queue range found\n");
1322*4882a593Smuzhiyun return -ENODEV;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun return 0;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
knav_free_queue_range(struct knav_device * kdev,struct knav_range_info * range)1327*4882a593Smuzhiyun static void knav_free_queue_range(struct knav_device *kdev,
1328*4882a593Smuzhiyun struct knav_range_info *range)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun if (range->ops && range->ops->free_range)
1331*4882a593Smuzhiyun range->ops->free_range(range);
1332*4882a593Smuzhiyun list_del(&range->list);
1333*4882a593Smuzhiyun devm_kfree(kdev->dev, range);
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
knav_free_queue_ranges(struct knav_device * kdev)1336*4882a593Smuzhiyun static void knav_free_queue_ranges(struct knav_device *kdev)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun struct knav_range_info *range;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun for (;;) {
1341*4882a593Smuzhiyun range = first_queue_range(kdev);
1342*4882a593Smuzhiyun if (!range)
1343*4882a593Smuzhiyun break;
1344*4882a593Smuzhiyun knav_free_queue_range(kdev, range);
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun
knav_queue_free_regions(struct knav_device * kdev)1348*4882a593Smuzhiyun static void knav_queue_free_regions(struct knav_device *kdev)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun struct knav_region *region;
1351*4882a593Smuzhiyun struct knav_pool *pool, *tmp;
1352*4882a593Smuzhiyun unsigned size;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun for (;;) {
1355*4882a593Smuzhiyun region = first_region(kdev);
1356*4882a593Smuzhiyun if (!region)
1357*4882a593Smuzhiyun break;
1358*4882a593Smuzhiyun list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1359*4882a593Smuzhiyun knav_pool_destroy(pool);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun size = region->virt_end - region->virt_start;
1362*4882a593Smuzhiyun if (size)
1363*4882a593Smuzhiyun free_pages_exact(region->virt_start, size);
1364*4882a593Smuzhiyun list_del(®ion->list);
1365*4882a593Smuzhiyun devm_kfree(kdev->dev, region);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
knav_queue_map_reg(struct knav_device * kdev,struct device_node * node,int index)1369*4882a593Smuzhiyun static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1370*4882a593Smuzhiyun struct device_node *node, int index)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun struct resource res;
1373*4882a593Smuzhiyun void __iomem *regs;
1374*4882a593Smuzhiyun int ret;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun ret = of_address_to_resource(node, index, &res);
1377*4882a593Smuzhiyun if (ret) {
1378*4882a593Smuzhiyun dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1379*4882a593Smuzhiyun node, index);
1380*4882a593Smuzhiyun return ERR_PTR(ret);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun regs = devm_ioremap_resource(kdev->dev, &res);
1384*4882a593Smuzhiyun if (IS_ERR(regs))
1385*4882a593Smuzhiyun dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1386*4882a593Smuzhiyun index, node);
1387*4882a593Smuzhiyun return regs;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
knav_queue_init_qmgrs(struct knav_device * kdev,struct device_node * qmgrs)1390*4882a593Smuzhiyun static int knav_queue_init_qmgrs(struct knav_device *kdev,
1391*4882a593Smuzhiyun struct device_node *qmgrs)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun struct device *dev = kdev->dev;
1394*4882a593Smuzhiyun struct knav_qmgr_info *qmgr;
1395*4882a593Smuzhiyun struct device_node *child;
1396*4882a593Smuzhiyun u32 temp[2];
1397*4882a593Smuzhiyun int ret;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun for_each_child_of_node(qmgrs, child) {
1400*4882a593Smuzhiyun qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1401*4882a593Smuzhiyun if (!qmgr) {
1402*4882a593Smuzhiyun dev_err(dev, "out of memory allocating qmgr\n");
1403*4882a593Smuzhiyun return -ENOMEM;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun ret = of_property_read_u32_array(child, "managed-queues",
1407*4882a593Smuzhiyun temp, 2);
1408*4882a593Smuzhiyun if (!ret) {
1409*4882a593Smuzhiyun qmgr->start_queue = temp[0];
1410*4882a593Smuzhiyun qmgr->num_queues = temp[1];
1411*4882a593Smuzhiyun } else {
1412*4882a593Smuzhiyun dev_err(dev, "invalid qmgr queue range\n");
1413*4882a593Smuzhiyun devm_kfree(dev, qmgr);
1414*4882a593Smuzhiyun continue;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1418*4882a593Smuzhiyun qmgr->start_queue, qmgr->num_queues);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun qmgr->reg_peek =
1421*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1422*4882a593Smuzhiyun KNAV_QUEUE_PEEK_REG_INDEX);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun if (kdev->version == QMSS) {
1425*4882a593Smuzhiyun qmgr->reg_status =
1426*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1427*4882a593Smuzhiyun KNAV_QUEUE_STATUS_REG_INDEX);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun qmgr->reg_config =
1431*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1432*4882a593Smuzhiyun (kdev->version == QMSS_66AK2G) ?
1433*4882a593Smuzhiyun KNAV_L_QUEUE_CONFIG_REG_INDEX :
1434*4882a593Smuzhiyun KNAV_QUEUE_CONFIG_REG_INDEX);
1435*4882a593Smuzhiyun qmgr->reg_region =
1436*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1437*4882a593Smuzhiyun (kdev->version == QMSS_66AK2G) ?
1438*4882a593Smuzhiyun KNAV_L_QUEUE_REGION_REG_INDEX :
1439*4882a593Smuzhiyun KNAV_QUEUE_REGION_REG_INDEX);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun qmgr->reg_push =
1442*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1443*4882a593Smuzhiyun (kdev->version == QMSS_66AK2G) ?
1444*4882a593Smuzhiyun KNAV_L_QUEUE_PUSH_REG_INDEX :
1445*4882a593Smuzhiyun KNAV_QUEUE_PUSH_REG_INDEX);
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun if (kdev->version == QMSS) {
1448*4882a593Smuzhiyun qmgr->reg_pop =
1449*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1450*4882a593Smuzhiyun KNAV_QUEUE_POP_REG_INDEX);
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if (IS_ERR(qmgr->reg_peek) ||
1454*4882a593Smuzhiyun ((kdev->version == QMSS) &&
1455*4882a593Smuzhiyun (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1456*4882a593Smuzhiyun IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1457*4882a593Smuzhiyun IS_ERR(qmgr->reg_push)) {
1458*4882a593Smuzhiyun dev_err(dev, "failed to map qmgr regs\n");
1459*4882a593Smuzhiyun if (kdev->version == QMSS) {
1460*4882a593Smuzhiyun if (!IS_ERR(qmgr->reg_status))
1461*4882a593Smuzhiyun devm_iounmap(dev, qmgr->reg_status);
1462*4882a593Smuzhiyun if (!IS_ERR(qmgr->reg_pop))
1463*4882a593Smuzhiyun devm_iounmap(dev, qmgr->reg_pop);
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun if (!IS_ERR(qmgr->reg_peek))
1466*4882a593Smuzhiyun devm_iounmap(dev, qmgr->reg_peek);
1467*4882a593Smuzhiyun if (!IS_ERR(qmgr->reg_config))
1468*4882a593Smuzhiyun devm_iounmap(dev, qmgr->reg_config);
1469*4882a593Smuzhiyun if (!IS_ERR(qmgr->reg_region))
1470*4882a593Smuzhiyun devm_iounmap(dev, qmgr->reg_region);
1471*4882a593Smuzhiyun if (!IS_ERR(qmgr->reg_push))
1472*4882a593Smuzhiyun devm_iounmap(dev, qmgr->reg_push);
1473*4882a593Smuzhiyun devm_kfree(dev, qmgr);
1474*4882a593Smuzhiyun continue;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun /* Use same push register for pop as well */
1478*4882a593Smuzhiyun if (kdev->version == QMSS_66AK2G)
1479*4882a593Smuzhiyun qmgr->reg_pop = qmgr->reg_push;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun list_add_tail(&qmgr->list, &kdev->qmgrs);
1482*4882a593Smuzhiyun dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1483*4882a593Smuzhiyun qmgr->start_queue, qmgr->num_queues,
1484*4882a593Smuzhiyun qmgr->reg_peek, qmgr->reg_status,
1485*4882a593Smuzhiyun qmgr->reg_config, qmgr->reg_region,
1486*4882a593Smuzhiyun qmgr->reg_push, qmgr->reg_pop);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun return 0;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun
knav_queue_init_pdsps(struct knav_device * kdev,struct device_node * pdsps)1491*4882a593Smuzhiyun static int knav_queue_init_pdsps(struct knav_device *kdev,
1492*4882a593Smuzhiyun struct device_node *pdsps)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun struct device *dev = kdev->dev;
1495*4882a593Smuzhiyun struct knav_pdsp_info *pdsp;
1496*4882a593Smuzhiyun struct device_node *child;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun for_each_child_of_node(pdsps, child) {
1499*4882a593Smuzhiyun pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1500*4882a593Smuzhiyun if (!pdsp) {
1501*4882a593Smuzhiyun dev_err(dev, "out of memory allocating pdsp\n");
1502*4882a593Smuzhiyun return -ENOMEM;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun pdsp->name = knav_queue_find_name(child);
1505*4882a593Smuzhiyun pdsp->iram =
1506*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1507*4882a593Smuzhiyun KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1508*4882a593Smuzhiyun pdsp->regs =
1509*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1510*4882a593Smuzhiyun KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1511*4882a593Smuzhiyun pdsp->intd =
1512*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1513*4882a593Smuzhiyun KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1514*4882a593Smuzhiyun pdsp->command =
1515*4882a593Smuzhiyun knav_queue_map_reg(kdev, child,
1516*4882a593Smuzhiyun KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1519*4882a593Smuzhiyun IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1520*4882a593Smuzhiyun dev_err(dev, "failed to map pdsp %s regs\n",
1521*4882a593Smuzhiyun pdsp->name);
1522*4882a593Smuzhiyun if (!IS_ERR(pdsp->command))
1523*4882a593Smuzhiyun devm_iounmap(dev, pdsp->command);
1524*4882a593Smuzhiyun if (!IS_ERR(pdsp->iram))
1525*4882a593Smuzhiyun devm_iounmap(dev, pdsp->iram);
1526*4882a593Smuzhiyun if (!IS_ERR(pdsp->regs))
1527*4882a593Smuzhiyun devm_iounmap(dev, pdsp->regs);
1528*4882a593Smuzhiyun if (!IS_ERR(pdsp->intd))
1529*4882a593Smuzhiyun devm_iounmap(dev, pdsp->intd);
1530*4882a593Smuzhiyun devm_kfree(dev, pdsp);
1531*4882a593Smuzhiyun continue;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun of_property_read_u32(child, "id", &pdsp->id);
1534*4882a593Smuzhiyun list_add_tail(&pdsp->list, &kdev->pdsps);
1535*4882a593Smuzhiyun dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1536*4882a593Smuzhiyun pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1537*4882a593Smuzhiyun pdsp->intd);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun return 0;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun
knav_queue_stop_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1542*4882a593Smuzhiyun static int knav_queue_stop_pdsp(struct knav_device *kdev,
1543*4882a593Smuzhiyun struct knav_pdsp_info *pdsp)
1544*4882a593Smuzhiyun {
1545*4882a593Smuzhiyun u32 val, timeout = 1000;
1546*4882a593Smuzhiyun int ret;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1549*4882a593Smuzhiyun writel_relaxed(val, &pdsp->regs->control);
1550*4882a593Smuzhiyun ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1551*4882a593Smuzhiyun PDSP_CTRL_RUNNING);
1552*4882a593Smuzhiyun if (ret < 0) {
1553*4882a593Smuzhiyun dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1554*4882a593Smuzhiyun return ret;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun pdsp->loaded = false;
1557*4882a593Smuzhiyun pdsp->started = false;
1558*4882a593Smuzhiyun return 0;
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun
knav_queue_load_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1561*4882a593Smuzhiyun static int knav_queue_load_pdsp(struct knav_device *kdev,
1562*4882a593Smuzhiyun struct knav_pdsp_info *pdsp)
1563*4882a593Smuzhiyun {
1564*4882a593Smuzhiyun int i, ret, fwlen;
1565*4882a593Smuzhiyun const struct firmware *fw;
1566*4882a593Smuzhiyun bool found = false;
1567*4882a593Smuzhiyun u32 *fwdata;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1570*4882a593Smuzhiyun if (knav_acc_firmwares[i]) {
1571*4882a593Smuzhiyun ret = request_firmware_direct(&fw,
1572*4882a593Smuzhiyun knav_acc_firmwares[i],
1573*4882a593Smuzhiyun kdev->dev);
1574*4882a593Smuzhiyun if (!ret) {
1575*4882a593Smuzhiyun found = true;
1576*4882a593Smuzhiyun break;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun if (!found) {
1582*4882a593Smuzhiyun dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1583*4882a593Smuzhiyun return -ENODEV;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1587*4882a593Smuzhiyun knav_acc_firmwares[i]);
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1590*4882a593Smuzhiyun /* download the firmware */
1591*4882a593Smuzhiyun fwdata = (u32 *)fw->data;
1592*4882a593Smuzhiyun fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1593*4882a593Smuzhiyun for (i = 0; i < fwlen; i++)
1594*4882a593Smuzhiyun writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun release_firmware(fw);
1597*4882a593Smuzhiyun return 0;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
knav_queue_start_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1600*4882a593Smuzhiyun static int knav_queue_start_pdsp(struct knav_device *kdev,
1601*4882a593Smuzhiyun struct knav_pdsp_info *pdsp)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun u32 val, timeout = 1000;
1604*4882a593Smuzhiyun int ret;
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun /* write a command for sync */
1607*4882a593Smuzhiyun writel_relaxed(0xffffffff, pdsp->command);
1608*4882a593Smuzhiyun while (readl_relaxed(pdsp->command) != 0xffffffff)
1609*4882a593Smuzhiyun cpu_relax();
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* soft reset the PDSP */
1612*4882a593Smuzhiyun val = readl_relaxed(&pdsp->regs->control);
1613*4882a593Smuzhiyun val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1614*4882a593Smuzhiyun writel_relaxed(val, &pdsp->regs->control);
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /* enable pdsp */
1617*4882a593Smuzhiyun val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1618*4882a593Smuzhiyun writel_relaxed(val, &pdsp->regs->control);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun /* wait for command register to clear */
1621*4882a593Smuzhiyun ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1622*4882a593Smuzhiyun if (ret < 0) {
1623*4882a593Smuzhiyun dev_err(kdev->dev,
1624*4882a593Smuzhiyun "timed out on pdsp %s command register wait\n",
1625*4882a593Smuzhiyun pdsp->name);
1626*4882a593Smuzhiyun return ret;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun return 0;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
knav_queue_stop_pdsps(struct knav_device * kdev)1631*4882a593Smuzhiyun static void knav_queue_stop_pdsps(struct knav_device *kdev)
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun struct knav_pdsp_info *pdsp;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun /* disable all pdsps */
1636*4882a593Smuzhiyun for_each_pdsp(kdev, pdsp)
1637*4882a593Smuzhiyun knav_queue_stop_pdsp(kdev, pdsp);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
knav_queue_start_pdsps(struct knav_device * kdev)1640*4882a593Smuzhiyun static int knav_queue_start_pdsps(struct knav_device *kdev)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun struct knav_pdsp_info *pdsp;
1643*4882a593Smuzhiyun int ret;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun knav_queue_stop_pdsps(kdev);
1646*4882a593Smuzhiyun /* now load them all. We return success even if pdsp
1647*4882a593Smuzhiyun * is not loaded as acc channels are optional on having
1648*4882a593Smuzhiyun * firmware availability in the system. We set the loaded
1649*4882a593Smuzhiyun * and stated flag and when initialize the acc range, check
1650*4882a593Smuzhiyun * it and init the range only if pdsp is started.
1651*4882a593Smuzhiyun */
1652*4882a593Smuzhiyun for_each_pdsp(kdev, pdsp) {
1653*4882a593Smuzhiyun ret = knav_queue_load_pdsp(kdev, pdsp);
1654*4882a593Smuzhiyun if (!ret)
1655*4882a593Smuzhiyun pdsp->loaded = true;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun for_each_pdsp(kdev, pdsp) {
1659*4882a593Smuzhiyun if (pdsp->loaded) {
1660*4882a593Smuzhiyun ret = knav_queue_start_pdsp(kdev, pdsp);
1661*4882a593Smuzhiyun if (!ret)
1662*4882a593Smuzhiyun pdsp->started = true;
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun return 0;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun
knav_find_qmgr(unsigned id)1668*4882a593Smuzhiyun static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1669*4882a593Smuzhiyun {
1670*4882a593Smuzhiyun struct knav_qmgr_info *qmgr;
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun for_each_qmgr(kdev, qmgr) {
1673*4882a593Smuzhiyun if ((id >= qmgr->start_queue) &&
1674*4882a593Smuzhiyun (id < qmgr->start_queue + qmgr->num_queues))
1675*4882a593Smuzhiyun return qmgr;
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun return NULL;
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun
knav_queue_init_queue(struct knav_device * kdev,struct knav_range_info * range,struct knav_queue_inst * inst,unsigned id)1680*4882a593Smuzhiyun static int knav_queue_init_queue(struct knav_device *kdev,
1681*4882a593Smuzhiyun struct knav_range_info *range,
1682*4882a593Smuzhiyun struct knav_queue_inst *inst,
1683*4882a593Smuzhiyun unsigned id)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun char irq_name[KNAV_NAME_SIZE];
1686*4882a593Smuzhiyun inst->qmgr = knav_find_qmgr(id);
1687*4882a593Smuzhiyun if (!inst->qmgr)
1688*4882a593Smuzhiyun return -1;
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun INIT_LIST_HEAD(&inst->handles);
1691*4882a593Smuzhiyun inst->kdev = kdev;
1692*4882a593Smuzhiyun inst->range = range;
1693*4882a593Smuzhiyun inst->irq_num = -1;
1694*4882a593Smuzhiyun inst->id = id;
1695*4882a593Smuzhiyun scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1696*4882a593Smuzhiyun inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun if (range->ops && range->ops->init_queue)
1699*4882a593Smuzhiyun return range->ops->init_queue(range, inst);
1700*4882a593Smuzhiyun else
1701*4882a593Smuzhiyun return 0;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun
knav_queue_init_queues(struct knav_device * kdev)1704*4882a593Smuzhiyun static int knav_queue_init_queues(struct knav_device *kdev)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun struct knav_range_info *range;
1707*4882a593Smuzhiyun int size, id, base_idx;
1708*4882a593Smuzhiyun int idx = 0, ret = 0;
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun /* how much do we need for instance data? */
1711*4882a593Smuzhiyun size = sizeof(struct knav_queue_inst);
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun /* round this up to a power of 2, keep the index to instance
1714*4882a593Smuzhiyun * arithmetic fast.
1715*4882a593Smuzhiyun * */
1716*4882a593Smuzhiyun kdev->inst_shift = order_base_2(size);
1717*4882a593Smuzhiyun size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1718*4882a593Smuzhiyun kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1719*4882a593Smuzhiyun if (!kdev->instances)
1720*4882a593Smuzhiyun return -ENOMEM;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun for_each_queue_range(kdev, range) {
1723*4882a593Smuzhiyun if (range->ops && range->ops->init_range)
1724*4882a593Smuzhiyun range->ops->init_range(range);
1725*4882a593Smuzhiyun base_idx = idx;
1726*4882a593Smuzhiyun for (id = range->queue_base;
1727*4882a593Smuzhiyun id < range->queue_base + range->num_queues; id++, idx++) {
1728*4882a593Smuzhiyun ret = knav_queue_init_queue(kdev, range,
1729*4882a593Smuzhiyun knav_queue_idx_to_inst(kdev, idx), id);
1730*4882a593Smuzhiyun if (ret < 0)
1731*4882a593Smuzhiyun return ret;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun range->queue_base_inst =
1734*4882a593Smuzhiyun knav_queue_idx_to_inst(kdev, base_idx);
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun return 0;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun /* Match table for of_platform binding */
1740*4882a593Smuzhiyun static const struct of_device_id keystone_qmss_of_match[] = {
1741*4882a593Smuzhiyun {
1742*4882a593Smuzhiyun .compatible = "ti,keystone-navigator-qmss",
1743*4882a593Smuzhiyun },
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun .compatible = "ti,66ak2g-navss-qm",
1746*4882a593Smuzhiyun .data = (void *)QMSS_66AK2G,
1747*4882a593Smuzhiyun },
1748*4882a593Smuzhiyun {},
1749*4882a593Smuzhiyun };
1750*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1751*4882a593Smuzhiyun
knav_queue_probe(struct platform_device * pdev)1752*4882a593Smuzhiyun static int knav_queue_probe(struct platform_device *pdev)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun struct device_node *node = pdev->dev.of_node;
1755*4882a593Smuzhiyun struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1756*4882a593Smuzhiyun const struct of_device_id *match;
1757*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1758*4882a593Smuzhiyun u32 temp[2];
1759*4882a593Smuzhiyun int ret;
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun if (!node) {
1762*4882a593Smuzhiyun dev_err(dev, "device tree info unavailable\n");
1763*4882a593Smuzhiyun return -ENODEV;
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1767*4882a593Smuzhiyun if (!kdev) {
1768*4882a593Smuzhiyun dev_err(dev, "memory allocation failed\n");
1769*4882a593Smuzhiyun return -ENOMEM;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1773*4882a593Smuzhiyun if (match && match->data)
1774*4882a593Smuzhiyun kdev->version = QMSS_66AK2G;
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun platform_set_drvdata(pdev, kdev);
1777*4882a593Smuzhiyun kdev->dev = dev;
1778*4882a593Smuzhiyun INIT_LIST_HEAD(&kdev->queue_ranges);
1779*4882a593Smuzhiyun INIT_LIST_HEAD(&kdev->qmgrs);
1780*4882a593Smuzhiyun INIT_LIST_HEAD(&kdev->pools);
1781*4882a593Smuzhiyun INIT_LIST_HEAD(&kdev->regions);
1782*4882a593Smuzhiyun INIT_LIST_HEAD(&kdev->pdsps);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun pm_runtime_enable(&pdev->dev);
1785*4882a593Smuzhiyun ret = pm_runtime_get_sync(&pdev->dev);
1786*4882a593Smuzhiyun if (ret < 0) {
1787*4882a593Smuzhiyun pm_runtime_put_noidle(&pdev->dev);
1788*4882a593Smuzhiyun dev_err(dev, "Failed to enable QMSS\n");
1789*4882a593Smuzhiyun return ret;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1793*4882a593Smuzhiyun dev_err(dev, "queue-range not specified\n");
1794*4882a593Smuzhiyun ret = -ENODEV;
1795*4882a593Smuzhiyun goto err;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun kdev->base_id = temp[0];
1798*4882a593Smuzhiyun kdev->num_queues = temp[1];
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun /* Initialize queue managers using device tree configuration */
1801*4882a593Smuzhiyun qmgrs = of_get_child_by_name(node, "qmgrs");
1802*4882a593Smuzhiyun if (!qmgrs) {
1803*4882a593Smuzhiyun dev_err(dev, "queue manager info not specified\n");
1804*4882a593Smuzhiyun ret = -ENODEV;
1805*4882a593Smuzhiyun goto err;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun ret = knav_queue_init_qmgrs(kdev, qmgrs);
1808*4882a593Smuzhiyun of_node_put(qmgrs);
1809*4882a593Smuzhiyun if (ret)
1810*4882a593Smuzhiyun goto err;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun /* get pdsp configuration values from device tree */
1813*4882a593Smuzhiyun pdsps = of_get_child_by_name(node, "pdsps");
1814*4882a593Smuzhiyun if (pdsps) {
1815*4882a593Smuzhiyun ret = knav_queue_init_pdsps(kdev, pdsps);
1816*4882a593Smuzhiyun if (ret)
1817*4882a593Smuzhiyun goto err;
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun ret = knav_queue_start_pdsps(kdev);
1820*4882a593Smuzhiyun if (ret)
1821*4882a593Smuzhiyun goto err;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun of_node_put(pdsps);
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun /* get usable queue range values from device tree */
1826*4882a593Smuzhiyun queue_pools = of_get_child_by_name(node, "queue-pools");
1827*4882a593Smuzhiyun if (!queue_pools) {
1828*4882a593Smuzhiyun dev_err(dev, "queue-pools not specified\n");
1829*4882a593Smuzhiyun ret = -ENODEV;
1830*4882a593Smuzhiyun goto err;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun ret = knav_setup_queue_pools(kdev, queue_pools);
1833*4882a593Smuzhiyun of_node_put(queue_pools);
1834*4882a593Smuzhiyun if (ret)
1835*4882a593Smuzhiyun goto err;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1838*4882a593Smuzhiyun if (ret) {
1839*4882a593Smuzhiyun dev_err(kdev->dev, "could not setup linking ram\n");
1840*4882a593Smuzhiyun goto err;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1844*4882a593Smuzhiyun if (ret) {
1845*4882a593Smuzhiyun /*
1846*4882a593Smuzhiyun * nothing really, we have one linking ram already, so we just
1847*4882a593Smuzhiyun * live within our means
1848*4882a593Smuzhiyun */
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun ret = knav_queue_setup_link_ram(kdev);
1852*4882a593Smuzhiyun if (ret)
1853*4882a593Smuzhiyun goto err;
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun regions = of_get_child_by_name(node, "descriptor-regions");
1856*4882a593Smuzhiyun if (!regions) {
1857*4882a593Smuzhiyun dev_err(dev, "descriptor-regions not specified\n");
1858*4882a593Smuzhiyun ret = -ENODEV;
1859*4882a593Smuzhiyun goto err;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun ret = knav_queue_setup_regions(kdev, regions);
1862*4882a593Smuzhiyun of_node_put(regions);
1863*4882a593Smuzhiyun if (ret)
1864*4882a593Smuzhiyun goto err;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun ret = knav_queue_init_queues(kdev);
1867*4882a593Smuzhiyun if (ret < 0) {
1868*4882a593Smuzhiyun dev_err(dev, "hwqueue initialization failed\n");
1869*4882a593Smuzhiyun goto err;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1873*4882a593Smuzhiyun &knav_queue_debug_fops);
1874*4882a593Smuzhiyun device_ready = true;
1875*4882a593Smuzhiyun return 0;
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun err:
1878*4882a593Smuzhiyun knav_queue_stop_pdsps(kdev);
1879*4882a593Smuzhiyun knav_queue_free_regions(kdev);
1880*4882a593Smuzhiyun knav_free_queue_ranges(kdev);
1881*4882a593Smuzhiyun pm_runtime_put_sync(&pdev->dev);
1882*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
1883*4882a593Smuzhiyun return ret;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun
knav_queue_remove(struct platform_device * pdev)1886*4882a593Smuzhiyun static int knav_queue_remove(struct platform_device *pdev)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun /* TODO: Free resources */
1889*4882a593Smuzhiyun pm_runtime_put_sync(&pdev->dev);
1890*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
1891*4882a593Smuzhiyun return 0;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun static struct platform_driver keystone_qmss_driver = {
1895*4882a593Smuzhiyun .probe = knav_queue_probe,
1896*4882a593Smuzhiyun .remove = knav_queue_remove,
1897*4882a593Smuzhiyun .driver = {
1898*4882a593Smuzhiyun .name = "keystone-navigator-qmss",
1899*4882a593Smuzhiyun .of_match_table = keystone_qmss_of_match,
1900*4882a593Smuzhiyun },
1901*4882a593Smuzhiyun };
1902*4882a593Smuzhiyun module_platform_driver(keystone_qmss_driver);
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1905*4882a593Smuzhiyun MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1906*4882a593Smuzhiyun MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1907*4882a593Smuzhiyun MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1908