1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2011 Freescale Semiconductor, Inc.
4*4882a593Smuzhiyun * Copyright 2011 Linaro Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/of.h>
10*4882a593Smuzhiyun #include <linux/of_address.h>
11*4882a593Smuzhiyun #include <linux/reset-controller.h>
12*4882a593Smuzhiyun #include <linux/smp.h>
13*4882a593Smuzhiyun #include <asm/smp_plat.h>
14*4882a593Smuzhiyun #include "common.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define SRC_SCR 0x000
17*4882a593Smuzhiyun #define SRC_GPR1 0x020
18*4882a593Smuzhiyun #define BP_SRC_SCR_WARM_RESET_ENABLE 0
19*4882a593Smuzhiyun #define BP_SRC_SCR_SW_GPU_RST 1
20*4882a593Smuzhiyun #define BP_SRC_SCR_SW_VPU_RST 2
21*4882a593Smuzhiyun #define BP_SRC_SCR_SW_IPU1_RST 3
22*4882a593Smuzhiyun #define BP_SRC_SCR_SW_OPEN_VG_RST 4
23*4882a593Smuzhiyun #define BP_SRC_SCR_SW_IPU2_RST 12
24*4882a593Smuzhiyun #define BP_SRC_SCR_CORE1_RST 14
25*4882a593Smuzhiyun #define BP_SRC_SCR_CORE1_ENABLE 22
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static void __iomem *src_base;
28*4882a593Smuzhiyun static DEFINE_SPINLOCK(scr_lock);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static const int sw_reset_bits[5] = {
31*4882a593Smuzhiyun BP_SRC_SCR_SW_GPU_RST,
32*4882a593Smuzhiyun BP_SRC_SCR_SW_VPU_RST,
33*4882a593Smuzhiyun BP_SRC_SCR_SW_IPU1_RST,
34*4882a593Smuzhiyun BP_SRC_SCR_SW_OPEN_VG_RST,
35*4882a593Smuzhiyun BP_SRC_SCR_SW_IPU2_RST
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
imx_src_reset_module(struct reset_controller_dev * rcdev,unsigned long sw_reset_idx)38*4882a593Smuzhiyun static int imx_src_reset_module(struct reset_controller_dev *rcdev,
39*4882a593Smuzhiyun unsigned long sw_reset_idx)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun unsigned long timeout;
42*4882a593Smuzhiyun unsigned long flags;
43*4882a593Smuzhiyun int bit;
44*4882a593Smuzhiyun u32 val;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (sw_reset_idx >= ARRAY_SIZE(sw_reset_bits))
47*4882a593Smuzhiyun return -EINVAL;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun bit = 1 << sw_reset_bits[sw_reset_idx];
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun spin_lock_irqsave(&scr_lock, flags);
52*4882a593Smuzhiyun val = readl_relaxed(src_base + SRC_SCR);
53*4882a593Smuzhiyun val |= bit;
54*4882a593Smuzhiyun writel_relaxed(val, src_base + SRC_SCR);
55*4882a593Smuzhiyun spin_unlock_irqrestore(&scr_lock, flags);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun timeout = jiffies + msecs_to_jiffies(1000);
58*4882a593Smuzhiyun while (readl(src_base + SRC_SCR) & bit) {
59*4882a593Smuzhiyun if (time_after(jiffies, timeout))
60*4882a593Smuzhiyun return -ETIME;
61*4882a593Smuzhiyun cpu_relax();
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun return 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static const struct reset_control_ops imx_src_ops = {
68*4882a593Smuzhiyun .reset = imx_src_reset_module,
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun static struct reset_controller_dev imx_reset_controller = {
72*4882a593Smuzhiyun .ops = &imx_src_ops,
73*4882a593Smuzhiyun .nr_resets = ARRAY_SIZE(sw_reset_bits),
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
imx_enable_cpu(int cpu,bool enable)76*4882a593Smuzhiyun void imx_enable_cpu(int cpu, bool enable)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun u32 mask, val;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun cpu = cpu_logical_map(cpu);
81*4882a593Smuzhiyun mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
82*4882a593Smuzhiyun spin_lock(&scr_lock);
83*4882a593Smuzhiyun val = readl_relaxed(src_base + SRC_SCR);
84*4882a593Smuzhiyun val = enable ? val | mask : val & ~mask;
85*4882a593Smuzhiyun val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
86*4882a593Smuzhiyun writel_relaxed(val, src_base + SRC_SCR);
87*4882a593Smuzhiyun spin_unlock(&scr_lock);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
imx_set_cpu_jump(int cpu,void * jump_addr)90*4882a593Smuzhiyun void imx_set_cpu_jump(int cpu, void *jump_addr)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun cpu = cpu_logical_map(cpu);
93*4882a593Smuzhiyun writel_relaxed(__pa_symbol(jump_addr),
94*4882a593Smuzhiyun src_base + SRC_GPR1 + cpu * 8);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
imx_get_cpu_arg(int cpu)97*4882a593Smuzhiyun u32 imx_get_cpu_arg(int cpu)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun cpu = cpu_logical_map(cpu);
100*4882a593Smuzhiyun return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
imx_set_cpu_arg(int cpu,u32 arg)103*4882a593Smuzhiyun void imx_set_cpu_arg(int cpu, u32 arg)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun cpu = cpu_logical_map(cpu);
106*4882a593Smuzhiyun writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
imx_src_init(void)109*4882a593Smuzhiyun void __init imx_src_init(void)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct device_node *np;
112*4882a593Smuzhiyun u32 val;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src");
115*4882a593Smuzhiyun if (!np)
116*4882a593Smuzhiyun return;
117*4882a593Smuzhiyun src_base = of_iomap(np, 0);
118*4882a593Smuzhiyun WARN_ON(!src_base);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun imx_reset_controller.of_node = np;
121*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
122*4882a593Smuzhiyun reset_controller_register(&imx_reset_controller);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * force warm reset sources to generate cold reset
126*4882a593Smuzhiyun * for a more reliable restart
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun spin_lock(&scr_lock);
129*4882a593Smuzhiyun val = readl_relaxed(src_base + SRC_SCR);
130*4882a593Smuzhiyun val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
131*4882a593Smuzhiyun writel_relaxed(val, src_base + SRC_SCR);
132*4882a593Smuzhiyun spin_unlock(&scr_lock);
133*4882a593Smuzhiyun }
134