1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * TI K3 R5F (MCU) Remote Processor driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
6*4882a593Smuzhiyun * Suman Anna <s-anna@ti.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/mailbox_client.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/of_address.h>
16*4882a593Smuzhiyun #include <linux/of_device.h>
17*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
18*4882a593Smuzhiyun #include <linux/omap-mailbox.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/pm_runtime.h>
21*4882a593Smuzhiyun #include <linux/remoteproc.h>
22*4882a593Smuzhiyun #include <linux/reset.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "omap_remoteproc.h"
26*4882a593Smuzhiyun #include "remoteproc_internal.h"
27*4882a593Smuzhiyun #include "ti_sci_proc.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* This address can either be for ATCM or BTCM with the other at address 0x0 */
30*4882a593Smuzhiyun #define K3_R5_TCM_DEV_ADDR 0x41010000
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* R5 TI-SCI Processor Configuration Flags */
33*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
34*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
35*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
36*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
37*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
38*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
39*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
40*4882a593Smuzhiyun #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* R5 TI-SCI Processor Control Flags */
43*4882a593Smuzhiyun #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* R5 TI-SCI Processor Status Flags */
46*4882a593Smuzhiyun #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
47*4882a593Smuzhiyun #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
48*4882a593Smuzhiyun #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
49*4882a593Smuzhiyun #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun * struct k3_r5_mem - internal memory structure
53*4882a593Smuzhiyun * @cpu_addr: MPU virtual address of the memory region
54*4882a593Smuzhiyun * @bus_addr: Bus address used to access the memory region
55*4882a593Smuzhiyun * @dev_addr: Device address from remoteproc view
56*4882a593Smuzhiyun * @size: Size of the memory region
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun struct k3_r5_mem {
59*4882a593Smuzhiyun void __iomem *cpu_addr;
60*4882a593Smuzhiyun phys_addr_t bus_addr;
61*4882a593Smuzhiyun u32 dev_addr;
62*4882a593Smuzhiyun size_t size;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun enum cluster_mode {
66*4882a593Smuzhiyun CLUSTER_MODE_SPLIT = 0,
67*4882a593Smuzhiyun CLUSTER_MODE_LOCKSTEP,
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun * struct k3_r5_cluster - K3 R5F Cluster structure
72*4882a593Smuzhiyun * @dev: cached device pointer
73*4882a593Smuzhiyun * @mode: Mode to configure the Cluster - Split or LockStep
74*4882a593Smuzhiyun * @cores: list of R5 cores within the cluster
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun struct k3_r5_cluster {
77*4882a593Smuzhiyun struct device *dev;
78*4882a593Smuzhiyun enum cluster_mode mode;
79*4882a593Smuzhiyun struct list_head cores;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * struct k3_r5_core - K3 R5 core structure
84*4882a593Smuzhiyun * @elem: linked list item
85*4882a593Smuzhiyun * @dev: cached device pointer
86*4882a593Smuzhiyun * @rproc: rproc handle representing this core
87*4882a593Smuzhiyun * @mem: internal memory regions data
88*4882a593Smuzhiyun * @sram: on-chip SRAM memory regions data
89*4882a593Smuzhiyun * @num_mems: number of internal memory regions
90*4882a593Smuzhiyun * @num_sram: number of on-chip SRAM memory regions
91*4882a593Smuzhiyun * @reset: reset control handle
92*4882a593Smuzhiyun * @tsp: TI-SCI processor control handle
93*4882a593Smuzhiyun * @ti_sci: TI-SCI handle
94*4882a593Smuzhiyun * @ti_sci_id: TI-SCI device identifier
95*4882a593Smuzhiyun * @atcm_enable: flag to control ATCM enablement
96*4882a593Smuzhiyun * @btcm_enable: flag to control BTCM enablement
97*4882a593Smuzhiyun * @loczrama: flag to dictate which TCM is at device address 0x0
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun struct k3_r5_core {
100*4882a593Smuzhiyun struct list_head elem;
101*4882a593Smuzhiyun struct device *dev;
102*4882a593Smuzhiyun struct rproc *rproc;
103*4882a593Smuzhiyun struct k3_r5_mem *mem;
104*4882a593Smuzhiyun struct k3_r5_mem *sram;
105*4882a593Smuzhiyun int num_mems;
106*4882a593Smuzhiyun int num_sram;
107*4882a593Smuzhiyun struct reset_control *reset;
108*4882a593Smuzhiyun struct ti_sci_proc *tsp;
109*4882a593Smuzhiyun const struct ti_sci_handle *ti_sci;
110*4882a593Smuzhiyun u32 ti_sci_id;
111*4882a593Smuzhiyun u32 atcm_enable;
112*4882a593Smuzhiyun u32 btcm_enable;
113*4882a593Smuzhiyun u32 loczrama;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /**
117*4882a593Smuzhiyun * struct k3_r5_rproc - K3 remote processor state
118*4882a593Smuzhiyun * @dev: cached device pointer
119*4882a593Smuzhiyun * @cluster: cached pointer to parent cluster structure
120*4882a593Smuzhiyun * @mbox: mailbox channel handle
121*4882a593Smuzhiyun * @client: mailbox client to request the mailbox channel
122*4882a593Smuzhiyun * @rproc: rproc handle
123*4882a593Smuzhiyun * @core: cached pointer to r5 core structure being used
124*4882a593Smuzhiyun * @rmem: reserved memory regions data
125*4882a593Smuzhiyun * @num_rmems: number of reserved memory regions
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun struct k3_r5_rproc {
128*4882a593Smuzhiyun struct device *dev;
129*4882a593Smuzhiyun struct k3_r5_cluster *cluster;
130*4882a593Smuzhiyun struct mbox_chan *mbox;
131*4882a593Smuzhiyun struct mbox_client client;
132*4882a593Smuzhiyun struct rproc *rproc;
133*4882a593Smuzhiyun struct k3_r5_core *core;
134*4882a593Smuzhiyun struct k3_r5_mem *rmem;
135*4882a593Smuzhiyun int num_rmems;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
140*4882a593Smuzhiyun * @client: mailbox client pointer used for requesting the mailbox channel
141*4882a593Smuzhiyun * @data: mailbox payload
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * This handler is invoked by the OMAP mailbox driver whenever a mailbox
144*4882a593Smuzhiyun * message is received. Usually, the mailbox payload simply contains
145*4882a593Smuzhiyun * the index of the virtqueue that is kicked by the remote processor,
146*4882a593Smuzhiyun * and we let remoteproc core handle it.
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * In addition to virtqueue indices, we also have some out-of-band values
149*4882a593Smuzhiyun * that indicate different events. Those values are deliberately very
150*4882a593Smuzhiyun * large so they don't coincide with virtqueue indices.
151*4882a593Smuzhiyun */
k3_r5_rproc_mbox_callback(struct mbox_client * client,void * data)152*4882a593Smuzhiyun static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
155*4882a593Smuzhiyun client);
156*4882a593Smuzhiyun struct device *dev = kproc->rproc->dev.parent;
157*4882a593Smuzhiyun const char *name = kproc->rproc->name;
158*4882a593Smuzhiyun u32 msg = omap_mbox_message(data);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun dev_dbg(dev, "mbox msg: 0x%x\n", msg);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun switch (msg) {
163*4882a593Smuzhiyun case RP_MBOX_CRASH:
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * remoteproc detected an exception, but error recovery is not
166*4882a593Smuzhiyun * supported. So, just log this for now
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun dev_err(dev, "K3 R5F rproc %s crashed\n", name);
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun case RP_MBOX_ECHO_REPLY:
171*4882a593Smuzhiyun dev_info(dev, "received echo reply from %s\n", name);
172*4882a593Smuzhiyun break;
173*4882a593Smuzhiyun default:
174*4882a593Smuzhiyun /* silently handle all other valid messages */
175*4882a593Smuzhiyun if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
176*4882a593Smuzhiyun return;
177*4882a593Smuzhiyun if (msg > kproc->rproc->max_notifyid) {
178*4882a593Smuzhiyun dev_dbg(dev, "dropping unknown message 0x%x", msg);
179*4882a593Smuzhiyun return;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun /* msg contains the index of the triggered vring */
182*4882a593Smuzhiyun if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
183*4882a593Smuzhiyun dev_dbg(dev, "no message was found in vqid %d\n", msg);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* kick a virtqueue */
k3_r5_rproc_kick(struct rproc * rproc,int vqid)188*4882a593Smuzhiyun static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct k3_r5_rproc *kproc = rproc->priv;
191*4882a593Smuzhiyun struct device *dev = rproc->dev.parent;
192*4882a593Smuzhiyun mbox_msg_t msg = (mbox_msg_t)vqid;
193*4882a593Smuzhiyun int ret;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* send the index of the triggered virtqueue in the mailbox payload */
196*4882a593Smuzhiyun ret = mbox_send_message(kproc->mbox, (void *)msg);
197*4882a593Smuzhiyun if (ret < 0)
198*4882a593Smuzhiyun dev_err(dev, "failed to send mailbox message, status = %d\n",
199*4882a593Smuzhiyun ret);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
k3_r5_split_reset(struct k3_r5_core * core)202*4882a593Smuzhiyun static int k3_r5_split_reset(struct k3_r5_core *core)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun int ret;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun ret = reset_control_assert(core->reset);
207*4882a593Smuzhiyun if (ret) {
208*4882a593Smuzhiyun dev_err(core->dev, "local-reset assert failed, ret = %d\n",
209*4882a593Smuzhiyun ret);
210*4882a593Smuzhiyun return ret;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
214*4882a593Smuzhiyun core->ti_sci_id);
215*4882a593Smuzhiyun if (ret) {
216*4882a593Smuzhiyun dev_err(core->dev, "module-reset assert failed, ret = %d\n",
217*4882a593Smuzhiyun ret);
218*4882a593Smuzhiyun if (reset_control_deassert(core->reset))
219*4882a593Smuzhiyun dev_warn(core->dev, "local-reset deassert back failed\n");
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return ret;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
k3_r5_split_release(struct k3_r5_core * core)225*4882a593Smuzhiyun static int k3_r5_split_release(struct k3_r5_core *core)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int ret;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
230*4882a593Smuzhiyun core->ti_sci_id);
231*4882a593Smuzhiyun if (ret) {
232*4882a593Smuzhiyun dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
233*4882a593Smuzhiyun ret);
234*4882a593Smuzhiyun return ret;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun ret = reset_control_deassert(core->reset);
238*4882a593Smuzhiyun if (ret) {
239*4882a593Smuzhiyun dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
240*4882a593Smuzhiyun ret);
241*4882a593Smuzhiyun if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
242*4882a593Smuzhiyun core->ti_sci_id))
243*4882a593Smuzhiyun dev_warn(core->dev, "module-reset assert back failed\n");
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return ret;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
k3_r5_lockstep_reset(struct k3_r5_cluster * cluster)249*4882a593Smuzhiyun static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct k3_r5_core *core;
252*4882a593Smuzhiyun int ret;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* assert local reset on all applicable cores */
255*4882a593Smuzhiyun list_for_each_entry(core, &cluster->cores, elem) {
256*4882a593Smuzhiyun ret = reset_control_assert(core->reset);
257*4882a593Smuzhiyun if (ret) {
258*4882a593Smuzhiyun dev_err(core->dev, "local-reset assert failed, ret = %d\n",
259*4882a593Smuzhiyun ret);
260*4882a593Smuzhiyun core = list_prev_entry(core, elem);
261*4882a593Smuzhiyun goto unroll_local_reset;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* disable PSC modules on all applicable cores */
266*4882a593Smuzhiyun list_for_each_entry(core, &cluster->cores, elem) {
267*4882a593Smuzhiyun ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
268*4882a593Smuzhiyun core->ti_sci_id);
269*4882a593Smuzhiyun if (ret) {
270*4882a593Smuzhiyun dev_err(core->dev, "module-reset assert failed, ret = %d\n",
271*4882a593Smuzhiyun ret);
272*4882a593Smuzhiyun goto unroll_module_reset;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun return 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun unroll_module_reset:
279*4882a593Smuzhiyun list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
280*4882a593Smuzhiyun if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
281*4882a593Smuzhiyun core->ti_sci_id))
282*4882a593Smuzhiyun dev_warn(core->dev, "module-reset assert back failed\n");
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
285*4882a593Smuzhiyun unroll_local_reset:
286*4882a593Smuzhiyun list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
287*4882a593Smuzhiyun if (reset_control_deassert(core->reset))
288*4882a593Smuzhiyun dev_warn(core->dev, "local-reset deassert back failed\n");
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun return ret;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
k3_r5_lockstep_release(struct k3_r5_cluster * cluster)294*4882a593Smuzhiyun static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct k3_r5_core *core;
297*4882a593Smuzhiyun int ret;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* enable PSC modules on all applicable cores */
300*4882a593Smuzhiyun list_for_each_entry_reverse(core, &cluster->cores, elem) {
301*4882a593Smuzhiyun ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
302*4882a593Smuzhiyun core->ti_sci_id);
303*4882a593Smuzhiyun if (ret) {
304*4882a593Smuzhiyun dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
305*4882a593Smuzhiyun ret);
306*4882a593Smuzhiyun core = list_next_entry(core, elem);
307*4882a593Smuzhiyun goto unroll_module_reset;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* deassert local reset on all applicable cores */
312*4882a593Smuzhiyun list_for_each_entry_reverse(core, &cluster->cores, elem) {
313*4882a593Smuzhiyun ret = reset_control_deassert(core->reset);
314*4882a593Smuzhiyun if (ret) {
315*4882a593Smuzhiyun dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
316*4882a593Smuzhiyun ret);
317*4882a593Smuzhiyun goto unroll_local_reset;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun unroll_local_reset:
324*4882a593Smuzhiyun list_for_each_entry_continue(core, &cluster->cores, elem) {
325*4882a593Smuzhiyun if (reset_control_assert(core->reset))
326*4882a593Smuzhiyun dev_warn(core->dev, "local-reset assert back failed\n");
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
329*4882a593Smuzhiyun unroll_module_reset:
330*4882a593Smuzhiyun list_for_each_entry_from(core, &cluster->cores, elem) {
331*4882a593Smuzhiyun if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
332*4882a593Smuzhiyun core->ti_sci_id))
333*4882a593Smuzhiyun dev_warn(core->dev, "module-reset assert back failed\n");
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
k3_r5_core_halt(struct k3_r5_core * core)339*4882a593Smuzhiyun static inline int k3_r5_core_halt(struct k3_r5_core *core)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun return ti_sci_proc_set_control(core->tsp,
342*4882a593Smuzhiyun PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
k3_r5_core_run(struct k3_r5_core * core)345*4882a593Smuzhiyun static inline int k3_r5_core_run(struct k3_r5_core *core)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun return ti_sci_proc_set_control(core->tsp,
348*4882a593Smuzhiyun 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun * The R5F cores have controls for both a reset and a halt/run. The code
353*4882a593Smuzhiyun * execution from DDR requires the initial boot-strapping code to be run
354*4882a593Smuzhiyun * from the internal TCMs. This function is used to release the resets on
355*4882a593Smuzhiyun * applicable cores to allow loading into the TCMs. The .prepare() ops is
356*4882a593Smuzhiyun * invoked by remoteproc core before any firmware loading, and is followed
357*4882a593Smuzhiyun * by the .start() ops after loading to actually let the R5 cores run.
358*4882a593Smuzhiyun */
k3_r5_rproc_prepare(struct rproc * rproc)359*4882a593Smuzhiyun static int k3_r5_rproc_prepare(struct rproc *rproc)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct k3_r5_rproc *kproc = rproc->priv;
362*4882a593Smuzhiyun struct k3_r5_cluster *cluster = kproc->cluster;
363*4882a593Smuzhiyun struct k3_r5_core *core = kproc->core;
364*4882a593Smuzhiyun struct device *dev = kproc->dev;
365*4882a593Smuzhiyun int ret;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
368*4882a593Smuzhiyun k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
369*4882a593Smuzhiyun if (ret) {
370*4882a593Smuzhiyun dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
371*4882a593Smuzhiyun ret);
372*4882a593Smuzhiyun return ret;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Zero out both TCMs unconditionally (access from v8 Arm core is not
377*4882a593Smuzhiyun * affected by ATCM & BTCM enable configuration values) so that ECC
378*4882a593Smuzhiyun * can be effective on all TCM addresses.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun dev_dbg(dev, "zeroing out ATCM memory\n");
381*4882a593Smuzhiyun memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun dev_dbg(dev, "zeroing out BTCM memory\n");
384*4882a593Smuzhiyun memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * This function implements the .unprepare() ops and performs the complimentary
391*4882a593Smuzhiyun * operations to that of the .prepare() ops. The function is used to assert the
392*4882a593Smuzhiyun * resets on all applicable cores for the rproc device (depending on LockStep
393*4882a593Smuzhiyun * or Split mode). This completes the second portion of powering down the R5F
394*4882a593Smuzhiyun * cores. The cores themselves are only halted in the .stop() ops, and the
395*4882a593Smuzhiyun * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
396*4882a593Smuzhiyun * stopped.
397*4882a593Smuzhiyun */
k3_r5_rproc_unprepare(struct rproc * rproc)398*4882a593Smuzhiyun static int k3_r5_rproc_unprepare(struct rproc *rproc)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct k3_r5_rproc *kproc = rproc->priv;
401*4882a593Smuzhiyun struct k3_r5_cluster *cluster = kproc->cluster;
402*4882a593Smuzhiyun struct k3_r5_core *core = kproc->core;
403*4882a593Smuzhiyun struct device *dev = kproc->dev;
404*4882a593Smuzhiyun int ret;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
407*4882a593Smuzhiyun k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
408*4882a593Smuzhiyun if (ret)
409*4882a593Smuzhiyun dev_err(dev, "unable to disable cores, ret = %d\n", ret);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun return ret;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * The R5F start sequence includes two different operations
416*4882a593Smuzhiyun * 1. Configure the boot vector for R5F core(s)
417*4882a593Smuzhiyun * 2. Unhalt/Run the R5F core(s)
418*4882a593Smuzhiyun *
419*4882a593Smuzhiyun * The sequence is different between LockStep and Split modes. The LockStep
420*4882a593Smuzhiyun * mode requires the boot vector to be configured only for Core0, and then
421*4882a593Smuzhiyun * unhalt both the cores to start the execution - Core1 needs to be unhalted
422*4882a593Smuzhiyun * first followed by Core0. The Split-mode requires that Core0 to be maintained
423*4882a593Smuzhiyun * always in a higher power state that Core1 (implying Core1 needs to be started
424*4882a593Smuzhiyun * always only after Core0 is started).
425*4882a593Smuzhiyun */
k3_r5_rproc_start(struct rproc * rproc)426*4882a593Smuzhiyun static int k3_r5_rproc_start(struct rproc *rproc)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun struct k3_r5_rproc *kproc = rproc->priv;
429*4882a593Smuzhiyun struct k3_r5_cluster *cluster = kproc->cluster;
430*4882a593Smuzhiyun struct mbox_client *client = &kproc->client;
431*4882a593Smuzhiyun struct device *dev = kproc->dev;
432*4882a593Smuzhiyun struct k3_r5_core *core;
433*4882a593Smuzhiyun u32 boot_addr;
434*4882a593Smuzhiyun int ret;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun client->dev = dev;
437*4882a593Smuzhiyun client->tx_done = NULL;
438*4882a593Smuzhiyun client->rx_callback = k3_r5_rproc_mbox_callback;
439*4882a593Smuzhiyun client->tx_block = false;
440*4882a593Smuzhiyun client->knows_txdone = false;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun kproc->mbox = mbox_request_channel(client, 0);
443*4882a593Smuzhiyun if (IS_ERR(kproc->mbox)) {
444*4882a593Smuzhiyun ret = -EBUSY;
445*4882a593Smuzhiyun dev_err(dev, "mbox_request_channel failed: %ld\n",
446*4882a593Smuzhiyun PTR_ERR(kproc->mbox));
447*4882a593Smuzhiyun return ret;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Ping the remote processor, this is only for sanity-sake for now;
452*4882a593Smuzhiyun * there is no functional effect whatsoever.
453*4882a593Smuzhiyun *
454*4882a593Smuzhiyun * Note that the reply will _not_ arrive immediately: this message
455*4882a593Smuzhiyun * will wait in the mailbox fifo until the remote processor is booted.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
458*4882a593Smuzhiyun if (ret < 0) {
459*4882a593Smuzhiyun dev_err(dev, "mbox_send_message failed: %d\n", ret);
460*4882a593Smuzhiyun goto put_mbox;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun boot_addr = rproc->bootaddr;
464*4882a593Smuzhiyun /* TODO: add boot_addr sanity checking */
465*4882a593Smuzhiyun dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* boot vector need not be programmed for Core1 in LockStep mode */
468*4882a593Smuzhiyun core = kproc->core;
469*4882a593Smuzhiyun ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
470*4882a593Smuzhiyun if (ret)
471*4882a593Smuzhiyun goto put_mbox;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* unhalt/run all applicable cores */
474*4882a593Smuzhiyun if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
475*4882a593Smuzhiyun list_for_each_entry_reverse(core, &cluster->cores, elem) {
476*4882a593Smuzhiyun ret = k3_r5_core_run(core);
477*4882a593Smuzhiyun if (ret)
478*4882a593Smuzhiyun goto unroll_core_run;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun } else {
481*4882a593Smuzhiyun ret = k3_r5_core_run(core);
482*4882a593Smuzhiyun if (ret)
483*4882a593Smuzhiyun goto put_mbox;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun unroll_core_run:
489*4882a593Smuzhiyun list_for_each_entry_continue(core, &cluster->cores, elem) {
490*4882a593Smuzhiyun if (k3_r5_core_halt(core))
491*4882a593Smuzhiyun dev_warn(core->dev, "core halt back failed\n");
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun put_mbox:
494*4882a593Smuzhiyun mbox_free_channel(kproc->mbox);
495*4882a593Smuzhiyun return ret;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * The R5F stop function includes the following operations
500*4882a593Smuzhiyun * 1. Halt R5F core(s)
501*4882a593Smuzhiyun *
502*4882a593Smuzhiyun * The sequence is different between LockStep and Split modes, and the order
503*4882a593Smuzhiyun * of cores the operations are performed are also in general reverse to that
504*4882a593Smuzhiyun * of the start function. The LockStep mode requires each operation to be
505*4882a593Smuzhiyun * performed first on Core0 followed by Core1. The Split-mode requires that
506*4882a593Smuzhiyun * Core0 to be maintained always in a higher power state that Core1 (implying
507*4882a593Smuzhiyun * Core1 needs to be stopped first before Core0).
508*4882a593Smuzhiyun *
509*4882a593Smuzhiyun * Note that the R5F halt operation in general is not effective when the R5F
510*4882a593Smuzhiyun * core is running, but is needed to make sure the core won't run after
511*4882a593Smuzhiyun * deasserting the reset the subsequent time. The asserting of reset can
512*4882a593Smuzhiyun * be done here, but is preferred to be done in the .unprepare() ops - this
513*4882a593Smuzhiyun * maintains the symmetric behavior between the .start(), .stop(), .prepare()
514*4882a593Smuzhiyun * and .unprepare() ops, and also balances them well between sysfs 'state'
515*4882a593Smuzhiyun * flow and device bind/unbind or module removal.
516*4882a593Smuzhiyun */
k3_r5_rproc_stop(struct rproc * rproc)517*4882a593Smuzhiyun static int k3_r5_rproc_stop(struct rproc *rproc)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct k3_r5_rproc *kproc = rproc->priv;
520*4882a593Smuzhiyun struct k3_r5_cluster *cluster = kproc->cluster;
521*4882a593Smuzhiyun struct k3_r5_core *core = kproc->core;
522*4882a593Smuzhiyun int ret;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* halt all applicable cores */
525*4882a593Smuzhiyun if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
526*4882a593Smuzhiyun list_for_each_entry(core, &cluster->cores, elem) {
527*4882a593Smuzhiyun ret = k3_r5_core_halt(core);
528*4882a593Smuzhiyun if (ret) {
529*4882a593Smuzhiyun core = list_prev_entry(core, elem);
530*4882a593Smuzhiyun goto unroll_core_halt;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun } else {
534*4882a593Smuzhiyun ret = k3_r5_core_halt(core);
535*4882a593Smuzhiyun if (ret)
536*4882a593Smuzhiyun goto out;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun mbox_free_channel(kproc->mbox);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return 0;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun unroll_core_halt:
544*4882a593Smuzhiyun list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
545*4882a593Smuzhiyun if (k3_r5_core_run(core))
546*4882a593Smuzhiyun dev_warn(core->dev, "core run back failed\n");
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun out:
549*4882a593Smuzhiyun return ret;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * Internal Memory translation helper
554*4882a593Smuzhiyun *
555*4882a593Smuzhiyun * Custom function implementing the rproc .da_to_va ops to provide address
556*4882a593Smuzhiyun * translation (device address to kernel virtual address) for internal RAMs
557*4882a593Smuzhiyun * present in a DSP or IPU device). The translated addresses can be used
558*4882a593Smuzhiyun * either by the remoteproc core for loading, or by any rpmsg bus drivers.
559*4882a593Smuzhiyun */
k3_r5_rproc_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)560*4882a593Smuzhiyun static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct k3_r5_rproc *kproc = rproc->priv;
563*4882a593Smuzhiyun struct k3_r5_core *core = kproc->core;
564*4882a593Smuzhiyun void __iomem *va = NULL;
565*4882a593Smuzhiyun phys_addr_t bus_addr;
566*4882a593Smuzhiyun u32 dev_addr, offset;
567*4882a593Smuzhiyun size_t size;
568*4882a593Smuzhiyun int i;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (len == 0)
571*4882a593Smuzhiyun return NULL;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* handle both R5 and SoC views of ATCM and BTCM */
574*4882a593Smuzhiyun for (i = 0; i < core->num_mems; i++) {
575*4882a593Smuzhiyun bus_addr = core->mem[i].bus_addr;
576*4882a593Smuzhiyun dev_addr = core->mem[i].dev_addr;
577*4882a593Smuzhiyun size = core->mem[i].size;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* handle R5-view addresses of TCMs */
580*4882a593Smuzhiyun if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
581*4882a593Smuzhiyun offset = da - dev_addr;
582*4882a593Smuzhiyun va = core->mem[i].cpu_addr + offset;
583*4882a593Smuzhiyun return (__force void *)va;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* handle SoC-view addresses of TCMs */
587*4882a593Smuzhiyun if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
588*4882a593Smuzhiyun offset = da - bus_addr;
589*4882a593Smuzhiyun va = core->mem[i].cpu_addr + offset;
590*4882a593Smuzhiyun return (__force void *)va;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* handle any SRAM regions using SoC-view addresses */
595*4882a593Smuzhiyun for (i = 0; i < core->num_sram; i++) {
596*4882a593Smuzhiyun dev_addr = core->sram[i].dev_addr;
597*4882a593Smuzhiyun size = core->sram[i].size;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
600*4882a593Smuzhiyun offset = da - dev_addr;
601*4882a593Smuzhiyun va = core->sram[i].cpu_addr + offset;
602*4882a593Smuzhiyun return (__force void *)va;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /* handle static DDR reserved memory regions */
607*4882a593Smuzhiyun for (i = 0; i < kproc->num_rmems; i++) {
608*4882a593Smuzhiyun dev_addr = kproc->rmem[i].dev_addr;
609*4882a593Smuzhiyun size = kproc->rmem[i].size;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
612*4882a593Smuzhiyun offset = da - dev_addr;
613*4882a593Smuzhiyun va = kproc->rmem[i].cpu_addr + offset;
614*4882a593Smuzhiyun return (__force void *)va;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return NULL;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun static const struct rproc_ops k3_r5_rproc_ops = {
622*4882a593Smuzhiyun .prepare = k3_r5_rproc_prepare,
623*4882a593Smuzhiyun .unprepare = k3_r5_rproc_unprepare,
624*4882a593Smuzhiyun .start = k3_r5_rproc_start,
625*4882a593Smuzhiyun .stop = k3_r5_rproc_stop,
626*4882a593Smuzhiyun .kick = k3_r5_rproc_kick,
627*4882a593Smuzhiyun .da_to_va = k3_r5_rproc_da_to_va,
628*4882a593Smuzhiyun };
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun * Internal R5F Core configuration
632*4882a593Smuzhiyun *
633*4882a593Smuzhiyun * Each R5FSS has a cluster-level setting for configuring the processor
634*4882a593Smuzhiyun * subsystem either in a safety/fault-tolerant LockStep mode or a performance
635*4882a593Smuzhiyun * oriented Split mode. Each R5F core has a number of settings to either
636*4882a593Smuzhiyun * enable/disable each of the TCMs, control which TCM appears at the R5F core's
637*4882a593Smuzhiyun * address 0x0. These settings need to be configured before the resets for the
638*4882a593Smuzhiyun * corresponding core are released. These settings are all protected and managed
639*4882a593Smuzhiyun * by the System Processor.
640*4882a593Smuzhiyun *
641*4882a593Smuzhiyun * This function is used to pre-configure these settings for each R5F core, and
642*4882a593Smuzhiyun * the configuration is all done through various ti_sci_proc functions that
643*4882a593Smuzhiyun * communicate with the System Processor. The function also ensures that both
644*4882a593Smuzhiyun * the cores are halted before the .prepare() step.
645*4882a593Smuzhiyun *
646*4882a593Smuzhiyun * The function is called from k3_r5_cluster_rproc_init() and is invoked either
647*4882a593Smuzhiyun * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode
648*4882a593Smuzhiyun * is dictated by an eFUSE register bit, and the config settings retrieved from
649*4882a593Smuzhiyun * DT are adjusted accordingly as per the permitted cluster mode. All cluster
650*4882a593Smuzhiyun * level settings like Cluster mode and TEINIT (exception handling state
651*4882a593Smuzhiyun * dictating ARM or Thumb mode) can only be set and retrieved using Core0.
652*4882a593Smuzhiyun *
653*4882a593Smuzhiyun * The function behavior is different based on the cluster mode. The R5F cores
654*4882a593Smuzhiyun * are configured independently as per their individual settings in Split mode.
655*4882a593Smuzhiyun * They are identically configured in LockStep mode using the primary Core0
656*4882a593Smuzhiyun * settings. However, some individual settings cannot be set in LockStep mode.
657*4882a593Smuzhiyun * This is overcome by switching to Split-mode initially and then programming
658*4882a593Smuzhiyun * both the cores with the same settings, before reconfiguing again for
659*4882a593Smuzhiyun * LockStep mode.
660*4882a593Smuzhiyun */
k3_r5_rproc_configure(struct k3_r5_rproc * kproc)661*4882a593Smuzhiyun static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun struct k3_r5_cluster *cluster = kproc->cluster;
664*4882a593Smuzhiyun struct device *dev = kproc->dev;
665*4882a593Smuzhiyun struct k3_r5_core *core0, *core, *temp;
666*4882a593Smuzhiyun u32 ctrl = 0, cfg = 0, stat = 0;
667*4882a593Smuzhiyun u32 set_cfg = 0, clr_cfg = 0;
668*4882a593Smuzhiyun u64 boot_vec = 0;
669*4882a593Smuzhiyun bool lockstep_en;
670*4882a593Smuzhiyun int ret;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
673*4882a593Smuzhiyun core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
676*4882a593Smuzhiyun &stat);
677*4882a593Smuzhiyun if (ret < 0)
678*4882a593Smuzhiyun return ret;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
681*4882a593Smuzhiyun boot_vec, cfg, ctrl, stat);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
684*4882a593Smuzhiyun if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
685*4882a593Smuzhiyun dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
686*4882a593Smuzhiyun cluster->mode = CLUSTER_MODE_SPLIT;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /* always enable ARM mode and set boot vector to 0 */
690*4882a593Smuzhiyun boot_vec = 0x0;
691*4882a593Smuzhiyun if (core == core0) {
692*4882a593Smuzhiyun clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
693*4882a593Smuzhiyun /*
694*4882a593Smuzhiyun * LockStep configuration bit is Read-only on Split-mode _only_
695*4882a593Smuzhiyun * devices and system firmware will NACK any requests with the
696*4882a593Smuzhiyun * bit configured, so program it only on permitted devices
697*4882a593Smuzhiyun */
698*4882a593Smuzhiyun if (lockstep_en)
699*4882a593Smuzhiyun clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (core->atcm_enable)
703*4882a593Smuzhiyun set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
704*4882a593Smuzhiyun else
705*4882a593Smuzhiyun clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (core->btcm_enable)
708*4882a593Smuzhiyun set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
709*4882a593Smuzhiyun else
710*4882a593Smuzhiyun clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (core->loczrama)
713*4882a593Smuzhiyun set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
714*4882a593Smuzhiyun else
715*4882a593Smuzhiyun clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
718*4882a593Smuzhiyun /*
719*4882a593Smuzhiyun * work around system firmware limitations to make sure both
720*4882a593Smuzhiyun * cores are programmed symmetrically in LockStep. LockStep
721*4882a593Smuzhiyun * and TEINIT config is only allowed with Core0.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun list_for_each_entry(temp, &cluster->cores, elem) {
724*4882a593Smuzhiyun ret = k3_r5_core_halt(temp);
725*4882a593Smuzhiyun if (ret)
726*4882a593Smuzhiyun goto out;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (temp != core) {
729*4882a593Smuzhiyun clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
730*4882a593Smuzhiyun clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
733*4882a593Smuzhiyun set_cfg, clr_cfg);
734*4882a593Smuzhiyun if (ret)
735*4882a593Smuzhiyun goto out;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
739*4882a593Smuzhiyun clr_cfg = 0;
740*4882a593Smuzhiyun ret = ti_sci_proc_set_config(core->tsp, boot_vec,
741*4882a593Smuzhiyun set_cfg, clr_cfg);
742*4882a593Smuzhiyun } else {
743*4882a593Smuzhiyun ret = k3_r5_core_halt(core);
744*4882a593Smuzhiyun if (ret)
745*4882a593Smuzhiyun goto out;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun ret = ti_sci_proc_set_config(core->tsp, boot_vec,
748*4882a593Smuzhiyun set_cfg, clr_cfg);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun out:
752*4882a593Smuzhiyun return ret;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
k3_r5_reserved_mem_init(struct k3_r5_rproc * kproc)755*4882a593Smuzhiyun static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun struct device *dev = kproc->dev;
758*4882a593Smuzhiyun struct device_node *np = dev_of_node(dev);
759*4882a593Smuzhiyun struct device_node *rmem_np;
760*4882a593Smuzhiyun struct reserved_mem *rmem;
761*4882a593Smuzhiyun int num_rmems;
762*4882a593Smuzhiyun int ret, i;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun num_rmems = of_property_count_elems_of_size(np, "memory-region",
765*4882a593Smuzhiyun sizeof(phandle));
766*4882a593Smuzhiyun if (num_rmems <= 0) {
767*4882a593Smuzhiyun dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
768*4882a593Smuzhiyun num_rmems);
769*4882a593Smuzhiyun return -EINVAL;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun if (num_rmems < 2) {
772*4882a593Smuzhiyun dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
773*4882a593Smuzhiyun num_rmems);
774*4882a593Smuzhiyun return -EINVAL;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* use reserved memory region 0 for vring DMA allocations */
778*4882a593Smuzhiyun ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
779*4882a593Smuzhiyun if (ret) {
780*4882a593Smuzhiyun dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
781*4882a593Smuzhiyun ret);
782*4882a593Smuzhiyun return ret;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun num_rmems--;
786*4882a593Smuzhiyun kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
787*4882a593Smuzhiyun if (!kproc->rmem) {
788*4882a593Smuzhiyun ret = -ENOMEM;
789*4882a593Smuzhiyun goto release_rmem;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* use remaining reserved memory regions for static carveouts */
793*4882a593Smuzhiyun for (i = 0; i < num_rmems; i++) {
794*4882a593Smuzhiyun rmem_np = of_parse_phandle(np, "memory-region", i + 1);
795*4882a593Smuzhiyun if (!rmem_np) {
796*4882a593Smuzhiyun ret = -EINVAL;
797*4882a593Smuzhiyun goto unmap_rmem;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun rmem = of_reserved_mem_lookup(rmem_np);
801*4882a593Smuzhiyun if (!rmem) {
802*4882a593Smuzhiyun of_node_put(rmem_np);
803*4882a593Smuzhiyun ret = -EINVAL;
804*4882a593Smuzhiyun goto unmap_rmem;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun of_node_put(rmem_np);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun kproc->rmem[i].bus_addr = rmem->base;
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * R5Fs do not have an MMU, but have a Region Address Translator
811*4882a593Smuzhiyun * (RAT) module that provides a fixed entry translation between
812*4882a593Smuzhiyun * the 32-bit processor addresses to 64-bit bus addresses. The
813*4882a593Smuzhiyun * RAT is programmable only by the R5F cores. Support for RAT
814*4882a593Smuzhiyun * is currently not supported, so 64-bit address regions are not
815*4882a593Smuzhiyun * supported. The absence of MMUs implies that the R5F device
816*4882a593Smuzhiyun * addresses/supported memory regions are restricted to 32-bit
817*4882a593Smuzhiyun * bus addresses, and are identical
818*4882a593Smuzhiyun */
819*4882a593Smuzhiyun kproc->rmem[i].dev_addr = (u32)rmem->base;
820*4882a593Smuzhiyun kproc->rmem[i].size = rmem->size;
821*4882a593Smuzhiyun kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
822*4882a593Smuzhiyun if (!kproc->rmem[i].cpu_addr) {
823*4882a593Smuzhiyun dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
824*4882a593Smuzhiyun i + 1, &rmem->base, &rmem->size);
825*4882a593Smuzhiyun ret = -ENOMEM;
826*4882a593Smuzhiyun goto unmap_rmem;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
830*4882a593Smuzhiyun i + 1, &kproc->rmem[i].bus_addr,
831*4882a593Smuzhiyun kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
832*4882a593Smuzhiyun kproc->rmem[i].dev_addr);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun kproc->num_rmems = num_rmems;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun return 0;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun unmap_rmem:
839*4882a593Smuzhiyun for (i--; i >= 0; i--)
840*4882a593Smuzhiyun iounmap(kproc->rmem[i].cpu_addr);
841*4882a593Smuzhiyun kfree(kproc->rmem);
842*4882a593Smuzhiyun release_rmem:
843*4882a593Smuzhiyun of_reserved_mem_device_release(dev);
844*4882a593Smuzhiyun return ret;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
k3_r5_reserved_mem_exit(struct k3_r5_rproc * kproc)847*4882a593Smuzhiyun static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun int i;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun for (i = 0; i < kproc->num_rmems; i++)
852*4882a593Smuzhiyun iounmap(kproc->rmem[i].cpu_addr);
853*4882a593Smuzhiyun kfree(kproc->rmem);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun of_reserved_mem_device_release(kproc->dev);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
k3_r5_cluster_rproc_init(struct platform_device * pdev)858*4882a593Smuzhiyun static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
861*4882a593Smuzhiyun struct device *dev = &pdev->dev;
862*4882a593Smuzhiyun struct k3_r5_rproc *kproc;
863*4882a593Smuzhiyun struct k3_r5_core *core, *core1;
864*4882a593Smuzhiyun struct device *cdev;
865*4882a593Smuzhiyun const char *fw_name;
866*4882a593Smuzhiyun struct rproc *rproc;
867*4882a593Smuzhiyun int ret;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
870*4882a593Smuzhiyun list_for_each_entry(core, &cluster->cores, elem) {
871*4882a593Smuzhiyun cdev = core->dev;
872*4882a593Smuzhiyun ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
873*4882a593Smuzhiyun if (ret) {
874*4882a593Smuzhiyun dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
875*4882a593Smuzhiyun ret);
876*4882a593Smuzhiyun goto out;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
880*4882a593Smuzhiyun fw_name, sizeof(*kproc));
881*4882a593Smuzhiyun if (!rproc) {
882*4882a593Smuzhiyun ret = -ENOMEM;
883*4882a593Smuzhiyun goto out;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* K3 R5s have a Region Address Translator (RAT) but no MMU */
887*4882a593Smuzhiyun rproc->has_iommu = false;
888*4882a593Smuzhiyun /* error recovery is not supported at present */
889*4882a593Smuzhiyun rproc->recovery_disabled = true;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun kproc = rproc->priv;
892*4882a593Smuzhiyun kproc->cluster = cluster;
893*4882a593Smuzhiyun kproc->core = core;
894*4882a593Smuzhiyun kproc->dev = cdev;
895*4882a593Smuzhiyun kproc->rproc = rproc;
896*4882a593Smuzhiyun core->rproc = rproc;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun ret = k3_r5_rproc_configure(kproc);
899*4882a593Smuzhiyun if (ret) {
900*4882a593Smuzhiyun dev_err(dev, "initial configure failed, ret = %d\n",
901*4882a593Smuzhiyun ret);
902*4882a593Smuzhiyun goto err_config;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun ret = k3_r5_reserved_mem_init(kproc);
906*4882a593Smuzhiyun if (ret) {
907*4882a593Smuzhiyun dev_err(dev, "reserved memory init failed, ret = %d\n",
908*4882a593Smuzhiyun ret);
909*4882a593Smuzhiyun goto err_config;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun ret = rproc_add(rproc);
913*4882a593Smuzhiyun if (ret) {
914*4882a593Smuzhiyun dev_err(dev, "rproc_add failed, ret = %d\n", ret);
915*4882a593Smuzhiyun goto err_add;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /* create only one rproc in lockstep mode */
919*4882a593Smuzhiyun if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
920*4882a593Smuzhiyun break;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun return 0;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun err_split:
926*4882a593Smuzhiyun rproc_del(rproc);
927*4882a593Smuzhiyun err_add:
928*4882a593Smuzhiyun k3_r5_reserved_mem_exit(kproc);
929*4882a593Smuzhiyun err_config:
930*4882a593Smuzhiyun rproc_free(rproc);
931*4882a593Smuzhiyun core->rproc = NULL;
932*4882a593Smuzhiyun out:
933*4882a593Smuzhiyun /* undo core0 upon any failures on core1 in split-mode */
934*4882a593Smuzhiyun if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
935*4882a593Smuzhiyun core = list_prev_entry(core, elem);
936*4882a593Smuzhiyun rproc = core->rproc;
937*4882a593Smuzhiyun kproc = rproc->priv;
938*4882a593Smuzhiyun goto err_split;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun return ret;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
k3_r5_cluster_rproc_exit(struct platform_device * pdev)943*4882a593Smuzhiyun static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
946*4882a593Smuzhiyun struct k3_r5_rproc *kproc;
947*4882a593Smuzhiyun struct k3_r5_core *core;
948*4882a593Smuzhiyun struct rproc *rproc;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * lockstep mode has only one rproc associated with first core, whereas
952*4882a593Smuzhiyun * split-mode has two rprocs associated with each core, and requires
953*4882a593Smuzhiyun * that core1 be powered down first
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
956*4882a593Smuzhiyun list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
957*4882a593Smuzhiyun list_last_entry(&cluster->cores, struct k3_r5_core, elem);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
960*4882a593Smuzhiyun rproc = core->rproc;
961*4882a593Smuzhiyun kproc = rproc->priv;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun rproc_del(rproc);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun k3_r5_reserved_mem_exit(kproc);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun rproc_free(rproc);
968*4882a593Smuzhiyun core->rproc = NULL;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun return 0;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
k3_r5_core_of_get_internal_memories(struct platform_device * pdev,struct k3_r5_core * core)974*4882a593Smuzhiyun static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
975*4882a593Smuzhiyun struct k3_r5_core *core)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun static const char * const mem_names[] = {"atcm", "btcm"};
978*4882a593Smuzhiyun struct device *dev = &pdev->dev;
979*4882a593Smuzhiyun struct resource *res;
980*4882a593Smuzhiyun int num_mems;
981*4882a593Smuzhiyun int i;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun num_mems = ARRAY_SIZE(mem_names);
984*4882a593Smuzhiyun core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
985*4882a593Smuzhiyun if (!core->mem)
986*4882a593Smuzhiyun return -ENOMEM;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun for (i = 0; i < num_mems; i++) {
989*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
990*4882a593Smuzhiyun mem_names[i]);
991*4882a593Smuzhiyun if (!res) {
992*4882a593Smuzhiyun dev_err(dev, "found no memory resource for %s\n",
993*4882a593Smuzhiyun mem_names[i]);
994*4882a593Smuzhiyun return -EINVAL;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun if (!devm_request_mem_region(dev, res->start,
997*4882a593Smuzhiyun resource_size(res),
998*4882a593Smuzhiyun dev_name(dev))) {
999*4882a593Smuzhiyun dev_err(dev, "could not request %s region for resource\n",
1000*4882a593Smuzhiyun mem_names[i]);
1001*4882a593Smuzhiyun return -EBUSY;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /*
1005*4882a593Smuzhiyun * TCMs are designed in general to support RAM-like backing
1006*4882a593Smuzhiyun * memories. So, map these as Normal Non-Cached memories. This
1007*4882a593Smuzhiyun * also avoids/fixes any potential alignment faults due to
1008*4882a593Smuzhiyun * unaligned data accesses when using memcpy() or memset()
1009*4882a593Smuzhiyun * functions (normally seen with device type memory).
1010*4882a593Smuzhiyun */
1011*4882a593Smuzhiyun core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
1012*4882a593Smuzhiyun resource_size(res));
1013*4882a593Smuzhiyun if (!core->mem[i].cpu_addr) {
1014*4882a593Smuzhiyun dev_err(dev, "failed to map %s memory\n", mem_names[i]);
1015*4882a593Smuzhiyun return -ENOMEM;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun core->mem[i].bus_addr = res->start;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /*
1020*4882a593Smuzhiyun * TODO:
1021*4882a593Smuzhiyun * The R5F cores can place ATCM & BTCM anywhere in its address
1022*4882a593Smuzhiyun * based on the corresponding Region Registers in the System
1023*4882a593Smuzhiyun * Control coprocessor. For now, place ATCM and BTCM at
1024*4882a593Smuzhiyun * addresses 0 and 0x41010000 (same as the bus address on AM65x
1025*4882a593Smuzhiyun * SoCs) based on loczrama setting
1026*4882a593Smuzhiyun */
1027*4882a593Smuzhiyun if (!strcmp(mem_names[i], "atcm")) {
1028*4882a593Smuzhiyun core->mem[i].dev_addr = core->loczrama ?
1029*4882a593Smuzhiyun 0 : K3_R5_TCM_DEV_ADDR;
1030*4882a593Smuzhiyun } else {
1031*4882a593Smuzhiyun core->mem[i].dev_addr = core->loczrama ?
1032*4882a593Smuzhiyun K3_R5_TCM_DEV_ADDR : 0;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun core->mem[i].size = resource_size(res);
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1037*4882a593Smuzhiyun mem_names[i], &core->mem[i].bus_addr,
1038*4882a593Smuzhiyun core->mem[i].size, core->mem[i].cpu_addr,
1039*4882a593Smuzhiyun core->mem[i].dev_addr);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun core->num_mems = num_mems;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun return 0;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
k3_r5_core_of_get_sram_memories(struct platform_device * pdev,struct k3_r5_core * core)1046*4882a593Smuzhiyun static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
1047*4882a593Smuzhiyun struct k3_r5_core *core)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun struct device_node *np = pdev->dev.of_node;
1050*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1051*4882a593Smuzhiyun struct device_node *sram_np;
1052*4882a593Smuzhiyun struct resource res;
1053*4882a593Smuzhiyun int num_sram;
1054*4882a593Smuzhiyun int i, ret;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
1057*4882a593Smuzhiyun if (num_sram <= 0) {
1058*4882a593Smuzhiyun dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
1059*4882a593Smuzhiyun num_sram);
1060*4882a593Smuzhiyun return 0;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
1064*4882a593Smuzhiyun if (!core->sram)
1065*4882a593Smuzhiyun return -ENOMEM;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun for (i = 0; i < num_sram; i++) {
1068*4882a593Smuzhiyun sram_np = of_parse_phandle(np, "sram", i);
1069*4882a593Smuzhiyun if (!sram_np)
1070*4882a593Smuzhiyun return -EINVAL;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun if (!of_device_is_available(sram_np)) {
1073*4882a593Smuzhiyun of_node_put(sram_np);
1074*4882a593Smuzhiyun return -EINVAL;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun ret = of_address_to_resource(sram_np, 0, &res);
1078*4882a593Smuzhiyun of_node_put(sram_np);
1079*4882a593Smuzhiyun if (ret)
1080*4882a593Smuzhiyun return -EINVAL;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun core->sram[i].bus_addr = res.start;
1083*4882a593Smuzhiyun core->sram[i].dev_addr = res.start;
1084*4882a593Smuzhiyun core->sram[i].size = resource_size(&res);
1085*4882a593Smuzhiyun core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
1086*4882a593Smuzhiyun resource_size(&res));
1087*4882a593Smuzhiyun if (!core->sram[i].cpu_addr) {
1088*4882a593Smuzhiyun dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
1089*4882a593Smuzhiyun i, &res.start);
1090*4882a593Smuzhiyun return -ENOMEM;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1094*4882a593Smuzhiyun i, &core->sram[i].bus_addr,
1095*4882a593Smuzhiyun core->sram[i].size, core->sram[i].cpu_addr,
1096*4882a593Smuzhiyun core->sram[i].dev_addr);
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun core->num_sram = num_sram;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun return 0;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun static
k3_r5_core_of_get_tsp(struct device * dev,const struct ti_sci_handle * sci)1104*4882a593Smuzhiyun struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
1105*4882a593Smuzhiyun const struct ti_sci_handle *sci)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun struct ti_sci_proc *tsp;
1108*4882a593Smuzhiyun u32 temp[2];
1109*4882a593Smuzhiyun int ret;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
1112*4882a593Smuzhiyun temp, 2);
1113*4882a593Smuzhiyun if (ret < 0)
1114*4882a593Smuzhiyun return ERR_PTR(ret);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
1117*4882a593Smuzhiyun if (!tsp)
1118*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun tsp->dev = dev;
1121*4882a593Smuzhiyun tsp->sci = sci;
1122*4882a593Smuzhiyun tsp->ops = &sci->ops.proc_ops;
1123*4882a593Smuzhiyun tsp->proc_id = temp[0];
1124*4882a593Smuzhiyun tsp->host_id = temp[1];
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun return tsp;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
k3_r5_core_of_init(struct platform_device * pdev)1129*4882a593Smuzhiyun static int k3_r5_core_of_init(struct platform_device *pdev)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1132*4882a593Smuzhiyun struct device_node *np = dev_of_node(dev);
1133*4882a593Smuzhiyun struct k3_r5_core *core;
1134*4882a593Smuzhiyun int ret;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
1137*4882a593Smuzhiyun return -ENOMEM;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
1140*4882a593Smuzhiyun if (!core) {
1141*4882a593Smuzhiyun ret = -ENOMEM;
1142*4882a593Smuzhiyun goto err;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun core->dev = dev;
1146*4882a593Smuzhiyun /*
1147*4882a593Smuzhiyun * Use SoC Power-on-Reset values as default if no DT properties are
1148*4882a593Smuzhiyun * used to dictate the TCM configurations
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun core->atcm_enable = 0;
1151*4882a593Smuzhiyun core->btcm_enable = 1;
1152*4882a593Smuzhiyun core->loczrama = 1;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
1155*4882a593Smuzhiyun if (ret < 0 && ret != -EINVAL) {
1156*4882a593Smuzhiyun dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
1157*4882a593Smuzhiyun ret);
1158*4882a593Smuzhiyun goto err;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
1162*4882a593Smuzhiyun if (ret < 0 && ret != -EINVAL) {
1163*4882a593Smuzhiyun dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
1164*4882a593Smuzhiyun ret);
1165*4882a593Smuzhiyun goto err;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
1169*4882a593Smuzhiyun if (ret < 0 && ret != -EINVAL) {
1170*4882a593Smuzhiyun dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
1171*4882a593Smuzhiyun goto err;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
1175*4882a593Smuzhiyun if (IS_ERR(core->ti_sci)) {
1176*4882a593Smuzhiyun ret = PTR_ERR(core->ti_sci);
1177*4882a593Smuzhiyun if (ret != -EPROBE_DEFER) {
1178*4882a593Smuzhiyun dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
1179*4882a593Smuzhiyun ret);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun core->ti_sci = NULL;
1182*4882a593Smuzhiyun goto err;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
1186*4882a593Smuzhiyun if (ret) {
1187*4882a593Smuzhiyun dev_err(dev, "missing 'ti,sci-dev-id' property\n");
1188*4882a593Smuzhiyun goto err;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun core->reset = devm_reset_control_get_exclusive(dev, NULL);
1192*4882a593Smuzhiyun if (IS_ERR_OR_NULL(core->reset)) {
1193*4882a593Smuzhiyun ret = PTR_ERR_OR_ZERO(core->reset);
1194*4882a593Smuzhiyun if (!ret)
1195*4882a593Smuzhiyun ret = -ENODEV;
1196*4882a593Smuzhiyun if (ret != -EPROBE_DEFER) {
1197*4882a593Smuzhiyun dev_err(dev, "failed to get reset handle, ret = %d\n",
1198*4882a593Smuzhiyun ret);
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun goto err;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
1204*4882a593Smuzhiyun if (IS_ERR(core->tsp)) {
1205*4882a593Smuzhiyun ret = PTR_ERR(core->tsp);
1206*4882a593Smuzhiyun dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
1207*4882a593Smuzhiyun ret);
1208*4882a593Smuzhiyun goto err;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun ret = k3_r5_core_of_get_internal_memories(pdev, core);
1212*4882a593Smuzhiyun if (ret) {
1213*4882a593Smuzhiyun dev_err(dev, "failed to get internal memories, ret = %d\n",
1214*4882a593Smuzhiyun ret);
1215*4882a593Smuzhiyun goto err;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun ret = k3_r5_core_of_get_sram_memories(pdev, core);
1219*4882a593Smuzhiyun if (ret) {
1220*4882a593Smuzhiyun dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
1221*4882a593Smuzhiyun goto err;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun ret = ti_sci_proc_request(core->tsp);
1225*4882a593Smuzhiyun if (ret < 0) {
1226*4882a593Smuzhiyun dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
1227*4882a593Smuzhiyun goto err;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun platform_set_drvdata(pdev, core);
1231*4882a593Smuzhiyun devres_close_group(dev, k3_r5_core_of_init);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun return 0;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun err:
1236*4882a593Smuzhiyun devres_release_group(dev, k3_r5_core_of_init);
1237*4882a593Smuzhiyun return ret;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * free the resources explicitly since driver model is not being used
1242*4882a593Smuzhiyun * for the child R5F devices
1243*4882a593Smuzhiyun */
k3_r5_core_of_exit(struct platform_device * pdev)1244*4882a593Smuzhiyun static void k3_r5_core_of_exit(struct platform_device *pdev)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun struct k3_r5_core *core = platform_get_drvdata(pdev);
1247*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1248*4882a593Smuzhiyun int ret;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun ret = ti_sci_proc_release(core->tsp);
1251*4882a593Smuzhiyun if (ret)
1252*4882a593Smuzhiyun dev_err(dev, "failed to release proc, ret = %d\n", ret);
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun platform_set_drvdata(pdev, NULL);
1255*4882a593Smuzhiyun devres_release_group(dev, k3_r5_core_of_init);
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun
k3_r5_cluster_of_exit(struct platform_device * pdev)1258*4882a593Smuzhiyun static void k3_r5_cluster_of_exit(struct platform_device *pdev)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1261*4882a593Smuzhiyun struct platform_device *cpdev;
1262*4882a593Smuzhiyun struct k3_r5_core *core, *temp;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
1265*4882a593Smuzhiyun list_del(&core->elem);
1266*4882a593Smuzhiyun cpdev = to_platform_device(core->dev);
1267*4882a593Smuzhiyun k3_r5_core_of_exit(cpdev);
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
k3_r5_cluster_of_init(struct platform_device * pdev)1271*4882a593Smuzhiyun static int k3_r5_cluster_of_init(struct platform_device *pdev)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1274*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1275*4882a593Smuzhiyun struct device_node *np = dev_of_node(dev);
1276*4882a593Smuzhiyun struct platform_device *cpdev;
1277*4882a593Smuzhiyun struct device_node *child;
1278*4882a593Smuzhiyun struct k3_r5_core *core;
1279*4882a593Smuzhiyun int ret;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun for_each_available_child_of_node(np, child) {
1282*4882a593Smuzhiyun cpdev = of_find_device_by_node(child);
1283*4882a593Smuzhiyun if (!cpdev) {
1284*4882a593Smuzhiyun ret = -ENODEV;
1285*4882a593Smuzhiyun dev_err(dev, "could not get R5 core platform device\n");
1286*4882a593Smuzhiyun of_node_put(child);
1287*4882a593Smuzhiyun goto fail;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun ret = k3_r5_core_of_init(cpdev);
1291*4882a593Smuzhiyun if (ret) {
1292*4882a593Smuzhiyun dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
1293*4882a593Smuzhiyun ret);
1294*4882a593Smuzhiyun put_device(&cpdev->dev);
1295*4882a593Smuzhiyun of_node_put(child);
1296*4882a593Smuzhiyun goto fail;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun core = platform_get_drvdata(cpdev);
1300*4882a593Smuzhiyun put_device(&cpdev->dev);
1301*4882a593Smuzhiyun list_add_tail(&core->elem, &cluster->cores);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun return 0;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun fail:
1307*4882a593Smuzhiyun k3_r5_cluster_of_exit(pdev);
1308*4882a593Smuzhiyun return ret;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
k3_r5_probe(struct platform_device * pdev)1311*4882a593Smuzhiyun static int k3_r5_probe(struct platform_device *pdev)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1314*4882a593Smuzhiyun struct device_node *np = dev_of_node(dev);
1315*4882a593Smuzhiyun struct k3_r5_cluster *cluster;
1316*4882a593Smuzhiyun int ret;
1317*4882a593Smuzhiyun int num_cores;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
1320*4882a593Smuzhiyun if (!cluster)
1321*4882a593Smuzhiyun return -ENOMEM;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun cluster->dev = dev;
1324*4882a593Smuzhiyun cluster->mode = CLUSTER_MODE_LOCKSTEP;
1325*4882a593Smuzhiyun INIT_LIST_HEAD(&cluster->cores);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
1328*4882a593Smuzhiyun if (ret < 0 && ret != -EINVAL) {
1329*4882a593Smuzhiyun dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
1330*4882a593Smuzhiyun ret);
1331*4882a593Smuzhiyun return ret;
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun num_cores = of_get_available_child_count(np);
1335*4882a593Smuzhiyun if (num_cores != 2) {
1336*4882a593Smuzhiyun dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
1337*4882a593Smuzhiyun num_cores);
1338*4882a593Smuzhiyun return -ENODEV;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun platform_set_drvdata(pdev, cluster);
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun ret = devm_of_platform_populate(dev);
1344*4882a593Smuzhiyun if (ret) {
1345*4882a593Smuzhiyun dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
1346*4882a593Smuzhiyun ret);
1347*4882a593Smuzhiyun return ret;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun ret = k3_r5_cluster_of_init(pdev);
1351*4882a593Smuzhiyun if (ret) {
1352*4882a593Smuzhiyun dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
1353*4882a593Smuzhiyun return ret;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun ret = devm_add_action_or_reset(dev,
1357*4882a593Smuzhiyun (void(*)(void *))k3_r5_cluster_of_exit,
1358*4882a593Smuzhiyun pdev);
1359*4882a593Smuzhiyun if (ret)
1360*4882a593Smuzhiyun return ret;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun ret = k3_r5_cluster_rproc_init(pdev);
1363*4882a593Smuzhiyun if (ret) {
1364*4882a593Smuzhiyun dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
1365*4882a593Smuzhiyun ret);
1366*4882a593Smuzhiyun return ret;
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun ret = devm_add_action_or_reset(dev,
1370*4882a593Smuzhiyun (void(*)(void *))k3_r5_cluster_rproc_exit,
1371*4882a593Smuzhiyun pdev);
1372*4882a593Smuzhiyun if (ret)
1373*4882a593Smuzhiyun return ret;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun return 0;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun static const struct of_device_id k3_r5_of_match[] = {
1379*4882a593Smuzhiyun { .compatible = "ti,am654-r5fss", },
1380*4882a593Smuzhiyun { .compatible = "ti,j721e-r5fss", },
1381*4882a593Smuzhiyun { /* sentinel */ },
1382*4882a593Smuzhiyun };
1383*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun static struct platform_driver k3_r5_rproc_driver = {
1386*4882a593Smuzhiyun .probe = k3_r5_probe,
1387*4882a593Smuzhiyun .driver = {
1388*4882a593Smuzhiyun .name = "k3_r5_rproc",
1389*4882a593Smuzhiyun .of_match_table = k3_r5_of_match,
1390*4882a593Smuzhiyun },
1391*4882a593Smuzhiyun };
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun module_platform_driver(k3_r5_rproc_driver);
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1396*4882a593Smuzhiyun MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
1397*4882a593Smuzhiyun MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
1398