1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Keystone accumulator queue manager
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
6*4882a593Smuzhiyun * Author: Sandeep Nair <sandeep_n@ti.com>
7*4882a593Smuzhiyun * Cyril Chemparathy <cyril@ti.com>
8*4882a593Smuzhiyun * Santosh Shilimkar <santosh.shilimkar@ti.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/dma-mapping.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/of_address.h>
16*4882a593Smuzhiyun #include <linux/soc/ti/knav_qmss.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "knav_qmss.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define knav_range_offset_to_inst(kdev, range, q) \
21*4882a593Smuzhiyun (range->queue_base_inst + (q << kdev->inst_shift))
22*4882a593Smuzhiyun
__knav_acc_notify(struct knav_range_info * range,struct knav_acc_channel * acc)23*4882a593Smuzhiyun static void __knav_acc_notify(struct knav_range_info *range,
24*4882a593Smuzhiyun struct knav_acc_channel *acc)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun struct knav_device *kdev = range->kdev;
27*4882a593Smuzhiyun struct knav_queue_inst *inst;
28*4882a593Smuzhiyun int range_base, queue;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun range_base = kdev->base_id + range->queue_base;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE) {
33*4882a593Smuzhiyun for (queue = 0; queue < range->num_queues; queue++) {
34*4882a593Smuzhiyun inst = knav_range_offset_to_inst(kdev, range,
35*4882a593Smuzhiyun queue);
36*4882a593Smuzhiyun if (inst->notify_needed) {
37*4882a593Smuzhiyun inst->notify_needed = 0;
38*4882a593Smuzhiyun dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
39*4882a593Smuzhiyun range_base + queue);
40*4882a593Smuzhiyun knav_queue_notify(inst);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun } else {
44*4882a593Smuzhiyun queue = acc->channel - range->acc_info.start_channel;
45*4882a593Smuzhiyun inst = knav_range_offset_to_inst(kdev, range, queue);
46*4882a593Smuzhiyun dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
47*4882a593Smuzhiyun range_base + queue);
48*4882a593Smuzhiyun knav_queue_notify(inst);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
knav_acc_set_notify(struct knav_range_info * range,struct knav_queue_inst * kq,bool enabled)52*4882a593Smuzhiyun static int knav_acc_set_notify(struct knav_range_info *range,
53*4882a593Smuzhiyun struct knav_queue_inst *kq,
54*4882a593Smuzhiyun bool enabled)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
57*4882a593Smuzhiyun struct knav_device *kdev = range->kdev;
58*4882a593Smuzhiyun u32 mask, offset;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * when enabling, we need to re-trigger an interrupt if we
62*4882a593Smuzhiyun * have descriptors pending
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if (!enabled || atomic_read(&kq->desc_count) <= 0)
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun kq->notify_needed = 1;
68*4882a593Smuzhiyun atomic_inc(&kq->acc->retrigger_count);
69*4882a593Smuzhiyun mask = BIT(kq->acc->channel % 32);
70*4882a593Smuzhiyun offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
71*4882a593Smuzhiyun dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
72*4882a593Smuzhiyun kq->acc->name);
73*4882a593Smuzhiyun writel_relaxed(mask, pdsp->intd + offset);
74*4882a593Smuzhiyun return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
knav_acc_int_handler(int irq,void * _instdata)77*4882a593Smuzhiyun static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct knav_acc_channel *acc;
80*4882a593Smuzhiyun struct knav_queue_inst *kq = NULL;
81*4882a593Smuzhiyun struct knav_range_info *range;
82*4882a593Smuzhiyun struct knav_pdsp_info *pdsp;
83*4882a593Smuzhiyun struct knav_acc_info *info;
84*4882a593Smuzhiyun struct knav_device *kdev;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun u32 *list, *list_cpu, val, idx, notifies;
87*4882a593Smuzhiyun int range_base, channel, queue = 0;
88*4882a593Smuzhiyun dma_addr_t list_dma;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun range = _instdata;
91*4882a593Smuzhiyun info = &range->acc_info;
92*4882a593Smuzhiyun kdev = range->kdev;
93*4882a593Smuzhiyun pdsp = range->acc_info.pdsp;
94*4882a593Smuzhiyun acc = range->acc;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun range_base = kdev->base_id + range->queue_base;
97*4882a593Smuzhiyun if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
98*4882a593Smuzhiyun for (queue = 0; queue < range->num_irqs; queue++)
99*4882a593Smuzhiyun if (range->irqs[queue].irq == irq)
100*4882a593Smuzhiyun break;
101*4882a593Smuzhiyun kq = knav_range_offset_to_inst(kdev, range, queue);
102*4882a593Smuzhiyun acc += queue;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun channel = acc->channel;
106*4882a593Smuzhiyun list_dma = acc->list_dma[acc->list_index];
107*4882a593Smuzhiyun list_cpu = acc->list_cpu[acc->list_index];
108*4882a593Smuzhiyun dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n",
109*4882a593Smuzhiyun channel, acc->list_index, list_cpu, &list_dma);
110*4882a593Smuzhiyun if (atomic_read(&acc->retrigger_count)) {
111*4882a593Smuzhiyun atomic_dec(&acc->retrigger_count);
112*4882a593Smuzhiyun __knav_acc_notify(range, acc);
113*4882a593Smuzhiyun writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
114*4882a593Smuzhiyun /* ack the interrupt */
115*4882a593Smuzhiyun writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
116*4882a593Smuzhiyun pdsp->intd + ACC_INTD_OFFSET_EOI);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return IRQ_HANDLED;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
122*4882a593Smuzhiyun WARN_ON(!notifies);
123*4882a593Smuzhiyun dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
124*4882a593Smuzhiyun DMA_FROM_DEVICE);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
127*4882a593Smuzhiyun list += ACC_LIST_ENTRY_WORDS) {
128*4882a593Smuzhiyun if (ACC_LIST_ENTRY_WORDS == 1) {
129*4882a593Smuzhiyun dev_dbg(kdev->dev,
130*4882a593Smuzhiyun "acc-irq: list %d, entry @%p, %08x\n",
131*4882a593Smuzhiyun acc->list_index, list, list[0]);
132*4882a593Smuzhiyun } else if (ACC_LIST_ENTRY_WORDS == 2) {
133*4882a593Smuzhiyun dev_dbg(kdev->dev,
134*4882a593Smuzhiyun "acc-irq: list %d, entry @%p, %08x %08x\n",
135*4882a593Smuzhiyun acc->list_index, list, list[0], list[1]);
136*4882a593Smuzhiyun } else if (ACC_LIST_ENTRY_WORDS == 4) {
137*4882a593Smuzhiyun dev_dbg(kdev->dev,
138*4882a593Smuzhiyun "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
139*4882a593Smuzhiyun acc->list_index, list, list[0], list[1],
140*4882a593Smuzhiyun list[2], list[3]);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun val = list[ACC_LIST_ENTRY_DESC_IDX];
144*4882a593Smuzhiyun if (!val)
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE) {
148*4882a593Smuzhiyun queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
149*4882a593Smuzhiyun if (queue < range_base ||
150*4882a593Smuzhiyun queue >= range_base + range->num_queues) {
151*4882a593Smuzhiyun dev_err(kdev->dev,
152*4882a593Smuzhiyun "bad queue %d, expecting %d-%d\n",
153*4882a593Smuzhiyun queue, range_base,
154*4882a593Smuzhiyun range_base + range->num_queues);
155*4882a593Smuzhiyun break;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun queue -= range_base;
158*4882a593Smuzhiyun kq = knav_range_offset_to_inst(kdev, range,
159*4882a593Smuzhiyun queue);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
163*4882a593Smuzhiyun atomic_dec(&kq->desc_count);
164*4882a593Smuzhiyun dev_err(kdev->dev,
165*4882a593Smuzhiyun "acc-irq: queue %d full, entry dropped\n",
166*4882a593Smuzhiyun queue + range_base);
167*4882a593Smuzhiyun continue;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
171*4882a593Smuzhiyun kq->descs[idx] = val;
172*4882a593Smuzhiyun kq->notify_needed = 1;
173*4882a593Smuzhiyun dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
174*4882a593Smuzhiyun val, idx, queue + range_base);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun __knav_acc_notify(range, acc);
178*4882a593Smuzhiyun memset(list_cpu, 0, info->list_size);
179*4882a593Smuzhiyun dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
180*4882a593Smuzhiyun DMA_TO_DEVICE);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* flip to the other list */
183*4882a593Smuzhiyun acc->list_index ^= 1;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* reset the interrupt counter */
186*4882a593Smuzhiyun writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* ack the interrupt */
189*4882a593Smuzhiyun writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
190*4882a593Smuzhiyun pdsp->intd + ACC_INTD_OFFSET_EOI);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun return IRQ_HANDLED;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
knav_range_setup_acc_irq(struct knav_range_info * range,int queue,bool enabled)195*4882a593Smuzhiyun static int knav_range_setup_acc_irq(struct knav_range_info *range,
196*4882a593Smuzhiyun int queue, bool enabled)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct knav_device *kdev = range->kdev;
199*4882a593Smuzhiyun struct knav_acc_channel *acc;
200*4882a593Smuzhiyun struct cpumask *cpu_mask;
201*4882a593Smuzhiyun int ret = 0, irq;
202*4882a593Smuzhiyun u32 old, new;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE) {
205*4882a593Smuzhiyun acc = range->acc;
206*4882a593Smuzhiyun irq = range->irqs[0].irq;
207*4882a593Smuzhiyun cpu_mask = range->irqs[0].cpu_mask;
208*4882a593Smuzhiyun } else {
209*4882a593Smuzhiyun acc = range->acc + queue;
210*4882a593Smuzhiyun irq = range->irqs[queue].irq;
211*4882a593Smuzhiyun cpu_mask = range->irqs[queue].cpu_mask;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun old = acc->open_mask;
215*4882a593Smuzhiyun if (enabled)
216*4882a593Smuzhiyun new = old | BIT(queue);
217*4882a593Smuzhiyun else
218*4882a593Smuzhiyun new = old & ~BIT(queue);
219*4882a593Smuzhiyun acc->open_mask = new;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun dev_dbg(kdev->dev,
222*4882a593Smuzhiyun "setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
223*4882a593Smuzhiyun old, new, acc->name);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (likely(new == old))
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (new && !old) {
229*4882a593Smuzhiyun dev_dbg(kdev->dev,
230*4882a593Smuzhiyun "setup-acc-irq: requesting %s for channel %s\n",
231*4882a593Smuzhiyun acc->name, acc->name);
232*4882a593Smuzhiyun ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
233*4882a593Smuzhiyun range);
234*4882a593Smuzhiyun if (!ret && cpu_mask) {
235*4882a593Smuzhiyun ret = irq_set_affinity_hint(irq, cpu_mask);
236*4882a593Smuzhiyun if (ret) {
237*4882a593Smuzhiyun dev_warn(range->kdev->dev,
238*4882a593Smuzhiyun "Failed to set IRQ affinity\n");
239*4882a593Smuzhiyun return ret;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (old && !new) {
245*4882a593Smuzhiyun dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
246*4882a593Smuzhiyun acc->name, acc->name);
247*4882a593Smuzhiyun ret = irq_set_affinity_hint(irq, NULL);
248*4882a593Smuzhiyun if (ret)
249*4882a593Smuzhiyun dev_warn(range->kdev->dev,
250*4882a593Smuzhiyun "Failed to set IRQ affinity\n");
251*4882a593Smuzhiyun free_irq(irq, range);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return ret;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
knav_acc_result_str(enum knav_acc_result result)257*4882a593Smuzhiyun static const char *knav_acc_result_str(enum knav_acc_result result)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun static const char * const result_str[] = {
260*4882a593Smuzhiyun [ACC_RET_IDLE] = "idle",
261*4882a593Smuzhiyun [ACC_RET_SUCCESS] = "success",
262*4882a593Smuzhiyun [ACC_RET_INVALID_COMMAND] = "invalid command",
263*4882a593Smuzhiyun [ACC_RET_INVALID_CHANNEL] = "invalid channel",
264*4882a593Smuzhiyun [ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
265*4882a593Smuzhiyun [ACC_RET_ACTIVE_CHANNEL] = "active channel",
266*4882a593Smuzhiyun [ACC_RET_INVALID_QUEUE] = "invalid queue",
267*4882a593Smuzhiyun [ACC_RET_INVALID_RET] = "invalid return code",
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (result >= ARRAY_SIZE(result_str))
271*4882a593Smuzhiyun return result_str[ACC_RET_INVALID_RET];
272*4882a593Smuzhiyun else
273*4882a593Smuzhiyun return result_str[result];
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun static enum knav_acc_result
knav_acc_write(struct knav_device * kdev,struct knav_pdsp_info * pdsp,struct knav_reg_acc_command * cmd)277*4882a593Smuzhiyun knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
278*4882a593Smuzhiyun struct knav_reg_acc_command *cmd)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun u32 result;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
283*4882a593Smuzhiyun cmd->command, cmd->queue_mask, cmd->list_dma,
284*4882a593Smuzhiyun cmd->queue_num, cmd->timer_config);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
287*4882a593Smuzhiyun writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
288*4882a593Smuzhiyun writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma);
289*4882a593Smuzhiyun writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
290*4882a593Smuzhiyun writel_relaxed(cmd->command, &pdsp->acc_command->command);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* wait for the command to clear */
293*4882a593Smuzhiyun do {
294*4882a593Smuzhiyun result = readl_relaxed(&pdsp->acc_command->command);
295*4882a593Smuzhiyun } while ((result >> 8) & 0xff);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun return (result >> 24) & 0xff;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
knav_acc_setup_cmd(struct knav_device * kdev,struct knav_range_info * range,struct knav_reg_acc_command * cmd,int queue)300*4882a593Smuzhiyun static void knav_acc_setup_cmd(struct knav_device *kdev,
301*4882a593Smuzhiyun struct knav_range_info *range,
302*4882a593Smuzhiyun struct knav_reg_acc_command *cmd,
303*4882a593Smuzhiyun int queue)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct knav_acc_info *info = &range->acc_info;
306*4882a593Smuzhiyun struct knav_acc_channel *acc;
307*4882a593Smuzhiyun int queue_base;
308*4882a593Smuzhiyun u32 queue_mask;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE) {
311*4882a593Smuzhiyun acc = range->acc;
312*4882a593Smuzhiyun queue_base = range->queue_base;
313*4882a593Smuzhiyun queue_mask = BIT(range->num_queues) - 1;
314*4882a593Smuzhiyun } else {
315*4882a593Smuzhiyun acc = range->acc + queue;
316*4882a593Smuzhiyun queue_base = range->queue_base + queue;
317*4882a593Smuzhiyun queue_mask = 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun memset(cmd, 0, sizeof(*cmd));
321*4882a593Smuzhiyun cmd->command = acc->channel;
322*4882a593Smuzhiyun cmd->queue_mask = queue_mask;
323*4882a593Smuzhiyun cmd->list_dma = (u32)acc->list_dma[0];
324*4882a593Smuzhiyun cmd->queue_num = info->list_entries << 16;
325*4882a593Smuzhiyun cmd->queue_num |= queue_base;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
328*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE)
329*4882a593Smuzhiyun cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
330*4882a593Smuzhiyun cmd->timer_config |= info->pacing_mode << 16;
331*4882a593Smuzhiyun cmd->timer_config |= info->timer_count;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
knav_acc_stop(struct knav_device * kdev,struct knav_range_info * range,int queue)334*4882a593Smuzhiyun static void knav_acc_stop(struct knav_device *kdev,
335*4882a593Smuzhiyun struct knav_range_info *range,
336*4882a593Smuzhiyun int queue)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct knav_reg_acc_command cmd;
339*4882a593Smuzhiyun struct knav_acc_channel *acc;
340*4882a593Smuzhiyun enum knav_acc_result result;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun acc = range->acc + queue;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun knav_acc_setup_cmd(kdev, range, &cmd, queue);
345*4882a593Smuzhiyun cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
346*4882a593Smuzhiyun result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
349*4882a593Smuzhiyun acc->name, knav_acc_result_str(result));
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
knav_acc_start(struct knav_device * kdev,struct knav_range_info * range,int queue)352*4882a593Smuzhiyun static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
353*4882a593Smuzhiyun struct knav_range_info *range,
354*4882a593Smuzhiyun int queue)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun struct knav_reg_acc_command cmd;
357*4882a593Smuzhiyun struct knav_acc_channel *acc;
358*4882a593Smuzhiyun enum knav_acc_result result;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun acc = range->acc + queue;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun knav_acc_setup_cmd(kdev, range, &cmd, queue);
363*4882a593Smuzhiyun cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
364*4882a593Smuzhiyun result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
367*4882a593Smuzhiyun acc->name, knav_acc_result_str(result));
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return result;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
knav_acc_init_range(struct knav_range_info * range)372*4882a593Smuzhiyun static int knav_acc_init_range(struct knav_range_info *range)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct knav_device *kdev = range->kdev;
375*4882a593Smuzhiyun struct knav_acc_channel *acc;
376*4882a593Smuzhiyun enum knav_acc_result result;
377*4882a593Smuzhiyun int queue;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun for (queue = 0; queue < range->num_queues; queue++) {
380*4882a593Smuzhiyun acc = range->acc + queue;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun knav_acc_stop(kdev, range, queue);
383*4882a593Smuzhiyun acc->list_index = 0;
384*4882a593Smuzhiyun result = knav_acc_start(kdev, range, queue);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (result != ACC_RET_SUCCESS)
387*4882a593Smuzhiyun return -EIO;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE)
390*4882a593Smuzhiyun return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
knav_acc_init_queue(struct knav_range_info * range,struct knav_queue_inst * kq)395*4882a593Smuzhiyun static int knav_acc_init_queue(struct knav_range_info *range,
396*4882a593Smuzhiyun struct knav_queue_inst *kq)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun unsigned id = kq->id - range->queue_base;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun kq->descs = devm_kcalloc(range->kdev->dev,
401*4882a593Smuzhiyun ACC_DESCS_MAX, sizeof(u32), GFP_KERNEL);
402*4882a593Smuzhiyun if (!kq->descs)
403*4882a593Smuzhiyun return -ENOMEM;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun kq->acc = range->acc;
406*4882a593Smuzhiyun if ((range->flags & RANGE_MULTI_QUEUE) == 0)
407*4882a593Smuzhiyun kq->acc += id;
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
knav_acc_open_queue(struct knav_range_info * range,struct knav_queue_inst * inst,unsigned flags)411*4882a593Smuzhiyun static int knav_acc_open_queue(struct knav_range_info *range,
412*4882a593Smuzhiyun struct knav_queue_inst *inst, unsigned flags)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun unsigned id = inst->id - range->queue_base;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return knav_range_setup_acc_irq(range, id, true);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
knav_acc_close_queue(struct knav_range_info * range,struct knav_queue_inst * inst)419*4882a593Smuzhiyun static int knav_acc_close_queue(struct knav_range_info *range,
420*4882a593Smuzhiyun struct knav_queue_inst *inst)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun unsigned id = inst->id - range->queue_base;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun return knav_range_setup_acc_irq(range, id, false);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
knav_acc_free_range(struct knav_range_info * range)427*4882a593Smuzhiyun static int knav_acc_free_range(struct knav_range_info *range)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct knav_device *kdev = range->kdev;
430*4882a593Smuzhiyun struct knav_acc_channel *acc;
431*4882a593Smuzhiyun struct knav_acc_info *info;
432*4882a593Smuzhiyun int channel, channels;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun info = &range->acc_info;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (range->flags & RANGE_MULTI_QUEUE)
437*4882a593Smuzhiyun channels = 1;
438*4882a593Smuzhiyun else
439*4882a593Smuzhiyun channels = range->num_queues;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun for (channel = 0; channel < channels; channel++) {
442*4882a593Smuzhiyun acc = range->acc + channel;
443*4882a593Smuzhiyun if (!acc->list_cpu[0])
444*4882a593Smuzhiyun continue;
445*4882a593Smuzhiyun dma_unmap_single(kdev->dev, acc->list_dma[0],
446*4882a593Smuzhiyun info->mem_size, DMA_BIDIRECTIONAL);
447*4882a593Smuzhiyun free_pages_exact(acc->list_cpu[0], info->mem_size);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun devm_kfree(range->kdev->dev, range->acc);
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun static struct knav_range_ops knav_acc_range_ops = {
454*4882a593Smuzhiyun .set_notify = knav_acc_set_notify,
455*4882a593Smuzhiyun .init_queue = knav_acc_init_queue,
456*4882a593Smuzhiyun .open_queue = knav_acc_open_queue,
457*4882a593Smuzhiyun .close_queue = knav_acc_close_queue,
458*4882a593Smuzhiyun .init_range = knav_acc_init_range,
459*4882a593Smuzhiyun .free_range = knav_acc_free_range,
460*4882a593Smuzhiyun };
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun * knav_init_acc_range: Initialise accumulator ranges
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * @kdev: qmss device
466*4882a593Smuzhiyun * @node: device node
467*4882a593Smuzhiyun * @range: qmms range information
468*4882a593Smuzhiyun *
469*4882a593Smuzhiyun * Return 0 on success or error
470*4882a593Smuzhiyun */
knav_init_acc_range(struct knav_device * kdev,struct device_node * node,struct knav_range_info * range)471*4882a593Smuzhiyun int knav_init_acc_range(struct knav_device *kdev,
472*4882a593Smuzhiyun struct device_node *node,
473*4882a593Smuzhiyun struct knav_range_info *range)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun struct knav_acc_channel *acc;
476*4882a593Smuzhiyun struct knav_pdsp_info *pdsp;
477*4882a593Smuzhiyun struct knav_acc_info *info;
478*4882a593Smuzhiyun int ret, channel, channels;
479*4882a593Smuzhiyun int list_size, mem_size;
480*4882a593Smuzhiyun dma_addr_t list_dma;
481*4882a593Smuzhiyun void *list_mem;
482*4882a593Smuzhiyun u32 config[5];
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun range->flags |= RANGE_HAS_ACCUMULATOR;
485*4882a593Smuzhiyun info = &range->acc_info;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun ret = of_property_read_u32_array(node, "accumulator", config, 5);
488*4882a593Smuzhiyun if (ret)
489*4882a593Smuzhiyun return ret;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun info->pdsp_id = config[0];
492*4882a593Smuzhiyun info->start_channel = config[1];
493*4882a593Smuzhiyun info->list_entries = config[2];
494*4882a593Smuzhiyun info->pacing_mode = config[3];
495*4882a593Smuzhiyun info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (info->start_channel > ACC_MAX_CHANNEL) {
498*4882a593Smuzhiyun dev_err(kdev->dev, "channel %d invalid for range %s\n",
499*4882a593Smuzhiyun info->start_channel, range->name);
500*4882a593Smuzhiyun return -EINVAL;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (info->pacing_mode > 3) {
504*4882a593Smuzhiyun dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
505*4882a593Smuzhiyun info->pacing_mode, range->name);
506*4882a593Smuzhiyun return -EINVAL;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun pdsp = knav_find_pdsp(kdev, info->pdsp_id);
510*4882a593Smuzhiyun if (!pdsp) {
511*4882a593Smuzhiyun dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
512*4882a593Smuzhiyun info->pdsp_id, range->name);
513*4882a593Smuzhiyun return -EINVAL;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (!pdsp->started) {
517*4882a593Smuzhiyun dev_err(kdev->dev, "pdsp id %d not started for range %s\n",
518*4882a593Smuzhiyun info->pdsp_id, range->name);
519*4882a593Smuzhiyun return -ENODEV;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun info->pdsp = pdsp;
523*4882a593Smuzhiyun channels = range->num_queues;
524*4882a593Smuzhiyun if (of_get_property(node, "multi-queue", NULL)) {
525*4882a593Smuzhiyun range->flags |= RANGE_MULTI_QUEUE;
526*4882a593Smuzhiyun channels = 1;
527*4882a593Smuzhiyun if (range->queue_base & (32 - 1)) {
528*4882a593Smuzhiyun dev_err(kdev->dev,
529*4882a593Smuzhiyun "misaligned multi-queue accumulator range %s\n",
530*4882a593Smuzhiyun range->name);
531*4882a593Smuzhiyun return -EINVAL;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun if (range->num_queues > 32) {
534*4882a593Smuzhiyun dev_err(kdev->dev,
535*4882a593Smuzhiyun "too many queues in accumulator range %s\n",
536*4882a593Smuzhiyun range->name);
537*4882a593Smuzhiyun return -EINVAL;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* figure out list size */
542*4882a593Smuzhiyun list_size = info->list_entries;
543*4882a593Smuzhiyun list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
544*4882a593Smuzhiyun info->list_size = list_size;
545*4882a593Smuzhiyun mem_size = PAGE_ALIGN(list_size * 2);
546*4882a593Smuzhiyun info->mem_size = mem_size;
547*4882a593Smuzhiyun range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc),
548*4882a593Smuzhiyun GFP_KERNEL);
549*4882a593Smuzhiyun if (!range->acc)
550*4882a593Smuzhiyun return -ENOMEM;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun for (channel = 0; channel < channels; channel++) {
553*4882a593Smuzhiyun acc = range->acc + channel;
554*4882a593Smuzhiyun acc->channel = info->start_channel + channel;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* allocate memory for the two lists */
557*4882a593Smuzhiyun list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
558*4882a593Smuzhiyun if (!list_mem)
559*4882a593Smuzhiyun return -ENOMEM;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
562*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
563*4882a593Smuzhiyun if (dma_mapping_error(kdev->dev, list_dma)) {
564*4882a593Smuzhiyun free_pages_exact(list_mem, mem_size);
565*4882a593Smuzhiyun return -ENOMEM;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun memset(list_mem, 0, mem_size);
569*4882a593Smuzhiyun dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
570*4882a593Smuzhiyun DMA_TO_DEVICE);
571*4882a593Smuzhiyun scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
572*4882a593Smuzhiyun acc->channel);
573*4882a593Smuzhiyun acc->list_cpu[0] = list_mem;
574*4882a593Smuzhiyun acc->list_cpu[1] = list_mem + list_size;
575*4882a593Smuzhiyun acc->list_dma[0] = list_dma;
576*4882a593Smuzhiyun acc->list_dma[1] = list_dma + list_size;
577*4882a593Smuzhiyun dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n",
578*4882a593Smuzhiyun acc->name, acc->channel, &list_dma, list_mem);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun range->ops = &knav_acc_range_ops;
582*4882a593Smuzhiyun return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_init_acc_range);
585