1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * TI K3 DSP Remote Processor(s) driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
6*4882a593Smuzhiyun * Suman Anna <s-anna@ti.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun #include <linux/mailbox_client.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
14*4882a593Smuzhiyun #include <linux/omap-mailbox.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/remoteproc.h>
17*4882a593Smuzhiyun #include <linux/reset.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "omap_remoteproc.h"
21*4882a593Smuzhiyun #include "remoteproc_internal.h"
22*4882a593Smuzhiyun #include "ti_sci_proc.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * struct k3_dsp_mem - internal memory structure
28*4882a593Smuzhiyun * @cpu_addr: MPU virtual address of the memory region
29*4882a593Smuzhiyun * @bus_addr: Bus address used to access the memory region
30*4882a593Smuzhiyun * @dev_addr: Device address of the memory region from DSP view
31*4882a593Smuzhiyun * @size: Size of the memory region
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun struct k3_dsp_mem {
34*4882a593Smuzhiyun void __iomem *cpu_addr;
35*4882a593Smuzhiyun phys_addr_t bus_addr;
36*4882a593Smuzhiyun u32 dev_addr;
37*4882a593Smuzhiyun size_t size;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun * struct k3_dsp_mem_data - memory definitions for a DSP
42*4882a593Smuzhiyun * @name: name for this memory entry
43*4882a593Smuzhiyun * @dev_addr: device address for the memory entry
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun struct k3_dsp_mem_data {
46*4882a593Smuzhiyun const char *name;
47*4882a593Smuzhiyun const u32 dev_addr;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * struct k3_dsp_dev_data - device data structure for a DSP
52*4882a593Smuzhiyun * @mems: pointer to memory definitions for a DSP
53*4882a593Smuzhiyun * @num_mems: number of memory regions in @mems
54*4882a593Smuzhiyun * @boot_align_addr: boot vector address alignment granularity
55*4882a593Smuzhiyun * @uses_lreset: flag to denote the need for local reset management
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun struct k3_dsp_dev_data {
58*4882a593Smuzhiyun const struct k3_dsp_mem_data *mems;
59*4882a593Smuzhiyun u32 num_mems;
60*4882a593Smuzhiyun u32 boot_align_addr;
61*4882a593Smuzhiyun bool uses_lreset;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun * struct k3_dsp_rproc - k3 DSP remote processor driver structure
66*4882a593Smuzhiyun * @dev: cached device pointer
67*4882a593Smuzhiyun * @rproc: remoteproc device handle
68*4882a593Smuzhiyun * @mem: internal memory regions data
69*4882a593Smuzhiyun * @num_mems: number of internal memory regions
70*4882a593Smuzhiyun * @rmem: reserved memory regions data
71*4882a593Smuzhiyun * @num_rmems: number of reserved memory regions
72*4882a593Smuzhiyun * @reset: reset control handle
73*4882a593Smuzhiyun * @data: pointer to DSP-specific device data
74*4882a593Smuzhiyun * @tsp: TI-SCI processor control handle
75*4882a593Smuzhiyun * @ti_sci: TI-SCI handle
76*4882a593Smuzhiyun * @ti_sci_id: TI-SCI device identifier
77*4882a593Smuzhiyun * @mbox: mailbox channel handle
78*4882a593Smuzhiyun * @client: mailbox client to request the mailbox channel
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun struct k3_dsp_rproc {
81*4882a593Smuzhiyun struct device *dev;
82*4882a593Smuzhiyun struct rproc *rproc;
83*4882a593Smuzhiyun struct k3_dsp_mem *mem;
84*4882a593Smuzhiyun int num_mems;
85*4882a593Smuzhiyun struct k3_dsp_mem *rmem;
86*4882a593Smuzhiyun int num_rmems;
87*4882a593Smuzhiyun struct reset_control *reset;
88*4882a593Smuzhiyun const struct k3_dsp_dev_data *data;
89*4882a593Smuzhiyun struct ti_sci_proc *tsp;
90*4882a593Smuzhiyun const struct ti_sci_handle *ti_sci;
91*4882a593Smuzhiyun u32 ti_sci_id;
92*4882a593Smuzhiyun struct mbox_chan *mbox;
93*4882a593Smuzhiyun struct mbox_client client;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
98*4882a593Smuzhiyun * @client: mailbox client pointer used for requesting the mailbox channel
99*4882a593Smuzhiyun * @data: mailbox payload
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * This handler is invoked by the OMAP mailbox driver whenever a mailbox
102*4882a593Smuzhiyun * message is received. Usually, the mailbox payload simply contains
103*4882a593Smuzhiyun * the index of the virtqueue that is kicked by the remote processor,
104*4882a593Smuzhiyun * and we let remoteproc core handle it.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * In addition to virtqueue indices, we also have some out-of-band values
107*4882a593Smuzhiyun * that indicate different events. Those values are deliberately very
108*4882a593Smuzhiyun * large so they don't coincide with virtqueue indices.
109*4882a593Smuzhiyun */
k3_dsp_rproc_mbox_callback(struct mbox_client * client,void * data)110*4882a593Smuzhiyun static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
113*4882a593Smuzhiyun client);
114*4882a593Smuzhiyun struct device *dev = kproc->rproc->dev.parent;
115*4882a593Smuzhiyun const char *name = kproc->rproc->name;
116*4882a593Smuzhiyun u32 msg = omap_mbox_message(data);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun dev_dbg(dev, "mbox msg: 0x%x\n", msg);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun switch (msg) {
121*4882a593Smuzhiyun case RP_MBOX_CRASH:
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * remoteproc detected an exception, but error recovery is not
124*4882a593Smuzhiyun * supported. So, just log this for now
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun dev_err(dev, "K3 DSP rproc %s crashed\n", name);
127*4882a593Smuzhiyun break;
128*4882a593Smuzhiyun case RP_MBOX_ECHO_REPLY:
129*4882a593Smuzhiyun dev_info(dev, "received echo reply from %s\n", name);
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun default:
132*4882a593Smuzhiyun /* silently handle all other valid messages */
133*4882a593Smuzhiyun if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
134*4882a593Smuzhiyun return;
135*4882a593Smuzhiyun if (msg > kproc->rproc->max_notifyid) {
136*4882a593Smuzhiyun dev_dbg(dev, "dropping unknown message 0x%x", msg);
137*4882a593Smuzhiyun return;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun /* msg contains the index of the triggered vring */
140*4882a593Smuzhiyun if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
141*4882a593Smuzhiyun dev_dbg(dev, "no message was found in vqid %d\n", msg);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * Kick the remote processor to notify about pending unprocessed messages.
147*4882a593Smuzhiyun * The vqid usage is not used and is inconsequential, as the kick is performed
148*4882a593Smuzhiyun * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
149*4882a593Smuzhiyun * the remote processor is expected to process both its Tx and Rx virtqueues.
150*4882a593Smuzhiyun */
k3_dsp_rproc_kick(struct rproc * rproc,int vqid)151*4882a593Smuzhiyun static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = rproc->priv;
154*4882a593Smuzhiyun struct device *dev = rproc->dev.parent;
155*4882a593Smuzhiyun mbox_msg_t msg = (mbox_msg_t)vqid;
156*4882a593Smuzhiyun int ret;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* send the index of the triggered virtqueue in the mailbox payload */
159*4882a593Smuzhiyun ret = mbox_send_message(kproc->mbox, (void *)msg);
160*4882a593Smuzhiyun if (ret < 0)
161*4882a593Smuzhiyun dev_err(dev, "failed to send mailbox message, status = %d\n",
162*4882a593Smuzhiyun ret);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Put the DSP processor into reset */
k3_dsp_rproc_reset(struct k3_dsp_rproc * kproc)166*4882a593Smuzhiyun static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct device *dev = kproc->dev;
169*4882a593Smuzhiyun int ret;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun ret = reset_control_assert(kproc->reset);
172*4882a593Smuzhiyun if (ret) {
173*4882a593Smuzhiyun dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
174*4882a593Smuzhiyun return ret;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (kproc->data->uses_lreset)
178*4882a593Smuzhiyun return ret;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
181*4882a593Smuzhiyun kproc->ti_sci_id);
182*4882a593Smuzhiyun if (ret) {
183*4882a593Smuzhiyun dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
184*4882a593Smuzhiyun if (reset_control_deassert(kproc->reset))
185*4882a593Smuzhiyun dev_warn(dev, "local-reset deassert back failed\n");
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return ret;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Release the DSP processor from reset */
k3_dsp_rproc_release(struct k3_dsp_rproc * kproc)192*4882a593Smuzhiyun static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun struct device *dev = kproc->dev;
195*4882a593Smuzhiyun int ret;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (kproc->data->uses_lreset)
198*4882a593Smuzhiyun goto lreset;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
201*4882a593Smuzhiyun kproc->ti_sci_id);
202*4882a593Smuzhiyun if (ret) {
203*4882a593Smuzhiyun dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
204*4882a593Smuzhiyun return ret;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun lreset:
208*4882a593Smuzhiyun ret = reset_control_deassert(kproc->reset);
209*4882a593Smuzhiyun if (ret) {
210*4882a593Smuzhiyun dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
211*4882a593Smuzhiyun if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
212*4882a593Smuzhiyun kproc->ti_sci_id))
213*4882a593Smuzhiyun dev_warn(dev, "module-reset assert back failed\n");
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return ret;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * The C66x DSP cores have a local reset that affects only the CPU, and a
221*4882a593Smuzhiyun * generic module reset that powers on the device and allows the DSP internal
222*4882a593Smuzhiyun * memories to be accessed while the local reset is asserted. This function is
223*4882a593Smuzhiyun * used to release the global reset on C66x DSPs to allow loading into the DSP
224*4882a593Smuzhiyun * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
225*4882a593Smuzhiyun * firmware loading, and is followed by the .start() ops after loading to
226*4882a593Smuzhiyun * actually let the C66x DSP cores run.
227*4882a593Smuzhiyun */
k3_dsp_rproc_prepare(struct rproc * rproc)228*4882a593Smuzhiyun static int k3_dsp_rproc_prepare(struct rproc *rproc)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = rproc->priv;
231*4882a593Smuzhiyun struct device *dev = kproc->dev;
232*4882a593Smuzhiyun int ret;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
235*4882a593Smuzhiyun kproc->ti_sci_id);
236*4882a593Smuzhiyun if (ret)
237*4882a593Smuzhiyun dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
238*4882a593Smuzhiyun ret);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return ret;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * This function implements the .unprepare() ops and performs the complimentary
245*4882a593Smuzhiyun * operations to that of the .prepare() ops. The function is used to assert the
246*4882a593Smuzhiyun * global reset on applicable C66x cores. This completes the second portion of
247*4882a593Smuzhiyun * powering down the C66x DSP cores. The cores themselves are only halted in the
248*4882a593Smuzhiyun * .stop() callback through the local reset, and the .unprepare() ops is invoked
249*4882a593Smuzhiyun * by the remoteproc core after the remoteproc is stopped to balance the global
250*4882a593Smuzhiyun * reset.
251*4882a593Smuzhiyun */
k3_dsp_rproc_unprepare(struct rproc * rproc)252*4882a593Smuzhiyun static int k3_dsp_rproc_unprepare(struct rproc *rproc)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = rproc->priv;
255*4882a593Smuzhiyun struct device *dev = kproc->dev;
256*4882a593Smuzhiyun int ret;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
259*4882a593Smuzhiyun kproc->ti_sci_id);
260*4882a593Smuzhiyun if (ret)
261*4882a593Smuzhiyun dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return ret;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Power up the DSP remote processor.
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * This function will be invoked only after the firmware for this rproc
270*4882a593Smuzhiyun * was loaded, parsed successfully, and all of its resource requirements
271*4882a593Smuzhiyun * were met.
272*4882a593Smuzhiyun */
k3_dsp_rproc_start(struct rproc * rproc)273*4882a593Smuzhiyun static int k3_dsp_rproc_start(struct rproc *rproc)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = rproc->priv;
276*4882a593Smuzhiyun struct mbox_client *client = &kproc->client;
277*4882a593Smuzhiyun struct device *dev = kproc->dev;
278*4882a593Smuzhiyun u32 boot_addr;
279*4882a593Smuzhiyun int ret;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun client->dev = dev;
282*4882a593Smuzhiyun client->tx_done = NULL;
283*4882a593Smuzhiyun client->rx_callback = k3_dsp_rproc_mbox_callback;
284*4882a593Smuzhiyun client->tx_block = false;
285*4882a593Smuzhiyun client->knows_txdone = false;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun kproc->mbox = mbox_request_channel(client, 0);
288*4882a593Smuzhiyun if (IS_ERR(kproc->mbox)) {
289*4882a593Smuzhiyun ret = -EBUSY;
290*4882a593Smuzhiyun dev_err(dev, "mbox_request_channel failed: %ld\n",
291*4882a593Smuzhiyun PTR_ERR(kproc->mbox));
292*4882a593Smuzhiyun return ret;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * Ping the remote processor, this is only for sanity-sake for now;
297*4882a593Smuzhiyun * there is no functional effect whatsoever.
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * Note that the reply will _not_ arrive immediately: this message
300*4882a593Smuzhiyun * will wait in the mailbox fifo until the remote processor is booted.
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
303*4882a593Smuzhiyun if (ret < 0) {
304*4882a593Smuzhiyun dev_err(dev, "mbox_send_message failed: %d\n", ret);
305*4882a593Smuzhiyun goto put_mbox;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun boot_addr = rproc->bootaddr;
309*4882a593Smuzhiyun if (boot_addr & (kproc->data->boot_align_addr - 1)) {
310*4882a593Smuzhiyun dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
311*4882a593Smuzhiyun boot_addr, kproc->data->boot_align_addr);
312*4882a593Smuzhiyun ret = -EINVAL;
313*4882a593Smuzhiyun goto put_mbox;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
317*4882a593Smuzhiyun ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
318*4882a593Smuzhiyun if (ret)
319*4882a593Smuzhiyun goto put_mbox;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun ret = k3_dsp_rproc_release(kproc);
322*4882a593Smuzhiyun if (ret)
323*4882a593Smuzhiyun goto put_mbox;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return 0;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun put_mbox:
328*4882a593Smuzhiyun mbox_free_channel(kproc->mbox);
329*4882a593Smuzhiyun return ret;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * Stop the DSP remote processor.
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * This function puts the DSP processor into reset, and finishes processing
336*4882a593Smuzhiyun * of any pending messages.
337*4882a593Smuzhiyun */
k3_dsp_rproc_stop(struct rproc * rproc)338*4882a593Smuzhiyun static int k3_dsp_rproc_stop(struct rproc *rproc)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = rproc->priv;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun mbox_free_channel(kproc->mbox);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun k3_dsp_rproc_reset(kproc);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /*
350*4882a593Smuzhiyun * Custom function to translate a DSP device address (internal RAMs only) to a
351*4882a593Smuzhiyun * kernel virtual address. The DSPs can access their RAMs at either an internal
352*4882a593Smuzhiyun * address visible only from a DSP, or at the SoC-level bus address. Both these
353*4882a593Smuzhiyun * addresses need to be looked through for translation. The translated addresses
354*4882a593Smuzhiyun * can be used either by the remoteproc core for loading (when using kernel
355*4882a593Smuzhiyun * remoteproc loader), or by any rpmsg bus drivers.
356*4882a593Smuzhiyun */
k3_dsp_rproc_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)357*4882a593Smuzhiyun static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = rproc->priv;
360*4882a593Smuzhiyun void __iomem *va = NULL;
361*4882a593Smuzhiyun phys_addr_t bus_addr;
362*4882a593Smuzhiyun u32 dev_addr, offset;
363*4882a593Smuzhiyun size_t size;
364*4882a593Smuzhiyun int i;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (len == 0)
367*4882a593Smuzhiyun return NULL;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun for (i = 0; i < kproc->num_mems; i++) {
370*4882a593Smuzhiyun bus_addr = kproc->mem[i].bus_addr;
371*4882a593Smuzhiyun dev_addr = kproc->mem[i].dev_addr;
372*4882a593Smuzhiyun size = kproc->mem[i].size;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
375*4882a593Smuzhiyun /* handle DSP-view addresses */
376*4882a593Smuzhiyun if (da >= dev_addr &&
377*4882a593Smuzhiyun ((da + len) <= (dev_addr + size))) {
378*4882a593Smuzhiyun offset = da - dev_addr;
379*4882a593Smuzhiyun va = kproc->mem[i].cpu_addr + offset;
380*4882a593Smuzhiyun return (__force void *)va;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun } else {
383*4882a593Smuzhiyun /* handle SoC-view addresses */
384*4882a593Smuzhiyun if (da >= bus_addr &&
385*4882a593Smuzhiyun (da + len) <= (bus_addr + size)) {
386*4882a593Smuzhiyun offset = da - bus_addr;
387*4882a593Smuzhiyun va = kproc->mem[i].cpu_addr + offset;
388*4882a593Smuzhiyun return (__force void *)va;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* handle static DDR reserved memory regions */
394*4882a593Smuzhiyun for (i = 0; i < kproc->num_rmems; i++) {
395*4882a593Smuzhiyun dev_addr = kproc->rmem[i].dev_addr;
396*4882a593Smuzhiyun size = kproc->rmem[i].size;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
399*4882a593Smuzhiyun offset = da - dev_addr;
400*4882a593Smuzhiyun va = kproc->rmem[i].cpu_addr + offset;
401*4882a593Smuzhiyun return (__force void *)va;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun return NULL;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun static const struct rproc_ops k3_dsp_rproc_ops = {
409*4882a593Smuzhiyun .start = k3_dsp_rproc_start,
410*4882a593Smuzhiyun .stop = k3_dsp_rproc_stop,
411*4882a593Smuzhiyun .kick = k3_dsp_rproc_kick,
412*4882a593Smuzhiyun .da_to_va = k3_dsp_rproc_da_to_va,
413*4882a593Smuzhiyun };
414*4882a593Smuzhiyun
k3_dsp_rproc_of_get_memories(struct platform_device * pdev,struct k3_dsp_rproc * kproc)415*4882a593Smuzhiyun static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
416*4882a593Smuzhiyun struct k3_dsp_rproc *kproc)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun const struct k3_dsp_dev_data *data = kproc->data;
419*4882a593Smuzhiyun struct device *dev = &pdev->dev;
420*4882a593Smuzhiyun struct resource *res;
421*4882a593Smuzhiyun int num_mems = 0;
422*4882a593Smuzhiyun int i;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun num_mems = kproc->data->num_mems;
425*4882a593Smuzhiyun kproc->mem = devm_kcalloc(kproc->dev, num_mems,
426*4882a593Smuzhiyun sizeof(*kproc->mem), GFP_KERNEL);
427*4882a593Smuzhiyun if (!kproc->mem)
428*4882a593Smuzhiyun return -ENOMEM;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun for (i = 0; i < num_mems; i++) {
431*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
432*4882a593Smuzhiyun data->mems[i].name);
433*4882a593Smuzhiyun if (!res) {
434*4882a593Smuzhiyun dev_err(dev, "found no memory resource for %s\n",
435*4882a593Smuzhiyun data->mems[i].name);
436*4882a593Smuzhiyun return -EINVAL;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun if (!devm_request_mem_region(dev, res->start,
439*4882a593Smuzhiyun resource_size(res),
440*4882a593Smuzhiyun dev_name(dev))) {
441*4882a593Smuzhiyun dev_err(dev, "could not request %s region for resource\n",
442*4882a593Smuzhiyun data->mems[i].name);
443*4882a593Smuzhiyun return -EBUSY;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
447*4882a593Smuzhiyun resource_size(res));
448*4882a593Smuzhiyun if (!kproc->mem[i].cpu_addr) {
449*4882a593Smuzhiyun dev_err(dev, "failed to map %s memory\n",
450*4882a593Smuzhiyun data->mems[i].name);
451*4882a593Smuzhiyun return -ENOMEM;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun kproc->mem[i].bus_addr = res->start;
454*4882a593Smuzhiyun kproc->mem[i].dev_addr = data->mems[i].dev_addr;
455*4882a593Smuzhiyun kproc->mem[i].size = resource_size(res);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
458*4882a593Smuzhiyun data->mems[i].name, &kproc->mem[i].bus_addr,
459*4882a593Smuzhiyun kproc->mem[i].size, kproc->mem[i].cpu_addr,
460*4882a593Smuzhiyun kproc->mem[i].dev_addr);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun kproc->num_mems = num_mems;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun return 0;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
k3_dsp_reserved_mem_init(struct k3_dsp_rproc * kproc)467*4882a593Smuzhiyun static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct device *dev = kproc->dev;
470*4882a593Smuzhiyun struct device_node *np = dev->of_node;
471*4882a593Smuzhiyun struct device_node *rmem_np;
472*4882a593Smuzhiyun struct reserved_mem *rmem;
473*4882a593Smuzhiyun int num_rmems;
474*4882a593Smuzhiyun int ret, i;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun num_rmems = of_property_count_elems_of_size(np, "memory-region",
477*4882a593Smuzhiyun sizeof(phandle));
478*4882a593Smuzhiyun if (num_rmems <= 0) {
479*4882a593Smuzhiyun dev_err(dev, "device does not reserved memory regions, ret = %d\n",
480*4882a593Smuzhiyun num_rmems);
481*4882a593Smuzhiyun return -EINVAL;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun if (num_rmems < 2) {
484*4882a593Smuzhiyun dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
485*4882a593Smuzhiyun num_rmems);
486*4882a593Smuzhiyun return -EINVAL;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* use reserved memory region 0 for vring DMA allocations */
490*4882a593Smuzhiyun ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
491*4882a593Smuzhiyun if (ret) {
492*4882a593Smuzhiyun dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
493*4882a593Smuzhiyun ret);
494*4882a593Smuzhiyun return ret;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun num_rmems--;
498*4882a593Smuzhiyun kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
499*4882a593Smuzhiyun if (!kproc->rmem) {
500*4882a593Smuzhiyun ret = -ENOMEM;
501*4882a593Smuzhiyun goto release_rmem;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /* use remaining reserved memory regions for static carveouts */
505*4882a593Smuzhiyun for (i = 0; i < num_rmems; i++) {
506*4882a593Smuzhiyun rmem_np = of_parse_phandle(np, "memory-region", i + 1);
507*4882a593Smuzhiyun if (!rmem_np) {
508*4882a593Smuzhiyun ret = -EINVAL;
509*4882a593Smuzhiyun goto unmap_rmem;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun rmem = of_reserved_mem_lookup(rmem_np);
513*4882a593Smuzhiyun if (!rmem) {
514*4882a593Smuzhiyun of_node_put(rmem_np);
515*4882a593Smuzhiyun ret = -EINVAL;
516*4882a593Smuzhiyun goto unmap_rmem;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun of_node_put(rmem_np);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun kproc->rmem[i].bus_addr = rmem->base;
521*4882a593Smuzhiyun /* 64-bit address regions currently not supported */
522*4882a593Smuzhiyun kproc->rmem[i].dev_addr = (u32)rmem->base;
523*4882a593Smuzhiyun kproc->rmem[i].size = rmem->size;
524*4882a593Smuzhiyun kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
525*4882a593Smuzhiyun if (!kproc->rmem[i].cpu_addr) {
526*4882a593Smuzhiyun dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
527*4882a593Smuzhiyun i + 1, &rmem->base, &rmem->size);
528*4882a593Smuzhiyun ret = -ENOMEM;
529*4882a593Smuzhiyun goto unmap_rmem;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
533*4882a593Smuzhiyun i + 1, &kproc->rmem[i].bus_addr,
534*4882a593Smuzhiyun kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
535*4882a593Smuzhiyun kproc->rmem[i].dev_addr);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun kproc->num_rmems = num_rmems;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun return 0;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun unmap_rmem:
542*4882a593Smuzhiyun for (i--; i >= 0; i--)
543*4882a593Smuzhiyun iounmap(kproc->rmem[i].cpu_addr);
544*4882a593Smuzhiyun kfree(kproc->rmem);
545*4882a593Smuzhiyun release_rmem:
546*4882a593Smuzhiyun of_reserved_mem_device_release(kproc->dev);
547*4882a593Smuzhiyun return ret;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
k3_dsp_reserved_mem_exit(struct k3_dsp_rproc * kproc)550*4882a593Smuzhiyun static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun int i;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun for (i = 0; i < kproc->num_rmems; i++)
555*4882a593Smuzhiyun iounmap(kproc->rmem[i].cpu_addr);
556*4882a593Smuzhiyun kfree(kproc->rmem);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun of_reserved_mem_device_release(kproc->dev);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun static
k3_dsp_rproc_of_get_tsp(struct device * dev,const struct ti_sci_handle * sci)562*4882a593Smuzhiyun struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
563*4882a593Smuzhiyun const struct ti_sci_handle *sci)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct ti_sci_proc *tsp;
566*4882a593Smuzhiyun u32 temp[2];
567*4882a593Smuzhiyun int ret;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
570*4882a593Smuzhiyun temp, 2);
571*4882a593Smuzhiyun if (ret < 0)
572*4882a593Smuzhiyun return ERR_PTR(ret);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
575*4882a593Smuzhiyun if (!tsp)
576*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun tsp->dev = dev;
579*4882a593Smuzhiyun tsp->sci = sci;
580*4882a593Smuzhiyun tsp->ops = &sci->ops.proc_ops;
581*4882a593Smuzhiyun tsp->proc_id = temp[0];
582*4882a593Smuzhiyun tsp->host_id = temp[1];
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun return tsp;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
k3_dsp_rproc_probe(struct platform_device * pdev)587*4882a593Smuzhiyun static int k3_dsp_rproc_probe(struct platform_device *pdev)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun struct device *dev = &pdev->dev;
590*4882a593Smuzhiyun struct device_node *np = dev->of_node;
591*4882a593Smuzhiyun const struct k3_dsp_dev_data *data;
592*4882a593Smuzhiyun struct k3_dsp_rproc *kproc;
593*4882a593Smuzhiyun struct rproc *rproc;
594*4882a593Smuzhiyun const char *fw_name;
595*4882a593Smuzhiyun int ret = 0;
596*4882a593Smuzhiyun int ret1;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun data = of_device_get_match_data(dev);
599*4882a593Smuzhiyun if (!data)
600*4882a593Smuzhiyun return -ENODEV;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun ret = rproc_of_parse_firmware(dev, 0, &fw_name);
603*4882a593Smuzhiyun if (ret) {
604*4882a593Smuzhiyun dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
605*4882a593Smuzhiyun ret);
606*4882a593Smuzhiyun return ret;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
610*4882a593Smuzhiyun sizeof(*kproc));
611*4882a593Smuzhiyun if (!rproc)
612*4882a593Smuzhiyun return -ENOMEM;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun rproc->has_iommu = false;
615*4882a593Smuzhiyun rproc->recovery_disabled = true;
616*4882a593Smuzhiyun if (data->uses_lreset) {
617*4882a593Smuzhiyun rproc->ops->prepare = k3_dsp_rproc_prepare;
618*4882a593Smuzhiyun rproc->ops->unprepare = k3_dsp_rproc_unprepare;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun kproc = rproc->priv;
621*4882a593Smuzhiyun kproc->rproc = rproc;
622*4882a593Smuzhiyun kproc->dev = dev;
623*4882a593Smuzhiyun kproc->data = data;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
626*4882a593Smuzhiyun if (IS_ERR(kproc->ti_sci)) {
627*4882a593Smuzhiyun ret = PTR_ERR(kproc->ti_sci);
628*4882a593Smuzhiyun if (ret != -EPROBE_DEFER) {
629*4882a593Smuzhiyun dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
630*4882a593Smuzhiyun ret);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun kproc->ti_sci = NULL;
633*4882a593Smuzhiyun goto free_rproc;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
637*4882a593Smuzhiyun if (ret) {
638*4882a593Smuzhiyun dev_err(dev, "missing 'ti,sci-dev-id' property\n");
639*4882a593Smuzhiyun goto put_sci;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
643*4882a593Smuzhiyun if (IS_ERR(kproc->reset)) {
644*4882a593Smuzhiyun ret = PTR_ERR(kproc->reset);
645*4882a593Smuzhiyun dev_err(dev, "failed to get reset, status = %d\n", ret);
646*4882a593Smuzhiyun goto put_sci;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
650*4882a593Smuzhiyun if (IS_ERR(kproc->tsp)) {
651*4882a593Smuzhiyun dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
652*4882a593Smuzhiyun ret);
653*4882a593Smuzhiyun ret = PTR_ERR(kproc->tsp);
654*4882a593Smuzhiyun goto put_sci;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun ret = ti_sci_proc_request(kproc->tsp);
658*4882a593Smuzhiyun if (ret < 0) {
659*4882a593Smuzhiyun dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
660*4882a593Smuzhiyun goto free_tsp;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
664*4882a593Smuzhiyun if (ret)
665*4882a593Smuzhiyun goto release_tsp;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun ret = k3_dsp_reserved_mem_init(kproc);
668*4882a593Smuzhiyun if (ret) {
669*4882a593Smuzhiyun dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
670*4882a593Smuzhiyun goto release_tsp;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /*
674*4882a593Smuzhiyun * ensure the DSP local reset is asserted to ensure the DSP doesn't
675*4882a593Smuzhiyun * execute bogus code in .prepare() when the module reset is released.
676*4882a593Smuzhiyun */
677*4882a593Smuzhiyun if (data->uses_lreset) {
678*4882a593Smuzhiyun ret = reset_control_status(kproc->reset);
679*4882a593Smuzhiyun if (ret < 0) {
680*4882a593Smuzhiyun dev_err(dev, "failed to get reset status, status = %d\n",
681*4882a593Smuzhiyun ret);
682*4882a593Smuzhiyun goto release_mem;
683*4882a593Smuzhiyun } else if (ret == 0) {
684*4882a593Smuzhiyun dev_warn(dev, "local reset is deasserted for device\n");
685*4882a593Smuzhiyun k3_dsp_rproc_reset(kproc);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun ret = rproc_add(rproc);
690*4882a593Smuzhiyun if (ret) {
691*4882a593Smuzhiyun dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
692*4882a593Smuzhiyun ret);
693*4882a593Smuzhiyun goto release_mem;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun platform_set_drvdata(pdev, kproc);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun return 0;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun release_mem:
701*4882a593Smuzhiyun k3_dsp_reserved_mem_exit(kproc);
702*4882a593Smuzhiyun release_tsp:
703*4882a593Smuzhiyun ret1 = ti_sci_proc_release(kproc->tsp);
704*4882a593Smuzhiyun if (ret1)
705*4882a593Smuzhiyun dev_err(dev, "failed to release proc, ret = %d\n", ret1);
706*4882a593Smuzhiyun free_tsp:
707*4882a593Smuzhiyun kfree(kproc->tsp);
708*4882a593Smuzhiyun put_sci:
709*4882a593Smuzhiyun ret1 = ti_sci_put_handle(kproc->ti_sci);
710*4882a593Smuzhiyun if (ret1)
711*4882a593Smuzhiyun dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
712*4882a593Smuzhiyun free_rproc:
713*4882a593Smuzhiyun rproc_free(rproc);
714*4882a593Smuzhiyun return ret;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
k3_dsp_rproc_remove(struct platform_device * pdev)717*4882a593Smuzhiyun static int k3_dsp_rproc_remove(struct platform_device *pdev)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
720*4882a593Smuzhiyun struct device *dev = &pdev->dev;
721*4882a593Smuzhiyun int ret;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun rproc_del(kproc->rproc);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun ret = ti_sci_proc_release(kproc->tsp);
726*4882a593Smuzhiyun if (ret)
727*4882a593Smuzhiyun dev_err(dev, "failed to release proc, ret = %d\n", ret);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun kfree(kproc->tsp);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun ret = ti_sci_put_handle(kproc->ti_sci);
732*4882a593Smuzhiyun if (ret)
733*4882a593Smuzhiyun dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun k3_dsp_reserved_mem_exit(kproc);
736*4882a593Smuzhiyun rproc_free(kproc->rproc);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun return 0;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun static const struct k3_dsp_mem_data c66_mems[] = {
742*4882a593Smuzhiyun { .name = "l2sram", .dev_addr = 0x800000 },
743*4882a593Smuzhiyun { .name = "l1pram", .dev_addr = 0xe00000 },
744*4882a593Smuzhiyun { .name = "l1dram", .dev_addr = 0xf00000 },
745*4882a593Smuzhiyun };
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* C71x cores only have a L1P Cache, there are no L1P SRAMs */
748*4882a593Smuzhiyun static const struct k3_dsp_mem_data c71_mems[] = {
749*4882a593Smuzhiyun { .name = "l2sram", .dev_addr = 0x800000 },
750*4882a593Smuzhiyun { .name = "l1dram", .dev_addr = 0xe00000 },
751*4882a593Smuzhiyun };
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun static const struct k3_dsp_dev_data c66_data = {
754*4882a593Smuzhiyun .mems = c66_mems,
755*4882a593Smuzhiyun .num_mems = ARRAY_SIZE(c66_mems),
756*4882a593Smuzhiyun .boot_align_addr = SZ_1K,
757*4882a593Smuzhiyun .uses_lreset = true,
758*4882a593Smuzhiyun };
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun static const struct k3_dsp_dev_data c71_data = {
761*4882a593Smuzhiyun .mems = c71_mems,
762*4882a593Smuzhiyun .num_mems = ARRAY_SIZE(c71_mems),
763*4882a593Smuzhiyun .boot_align_addr = SZ_2M,
764*4882a593Smuzhiyun .uses_lreset = false,
765*4882a593Smuzhiyun };
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun static const struct of_device_id k3_dsp_of_match[] = {
768*4882a593Smuzhiyun { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
769*4882a593Smuzhiyun { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
770*4882a593Smuzhiyun { /* sentinel */ },
771*4882a593Smuzhiyun };
772*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun static struct platform_driver k3_dsp_rproc_driver = {
775*4882a593Smuzhiyun .probe = k3_dsp_rproc_probe,
776*4882a593Smuzhiyun .remove = k3_dsp_rproc_remove,
777*4882a593Smuzhiyun .driver = {
778*4882a593Smuzhiyun .name = "k3-dsp-rproc",
779*4882a593Smuzhiyun .of_match_table = k3_dsp_of_match,
780*4882a593Smuzhiyun },
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun module_platform_driver(k3_dsp_rproc_driver);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
786*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
787*4882a593Smuzhiyun MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
788