xref: /OK3568_Linux_fs/kernel/drivers/soc/bcm/brcmstb/pm/pm-arm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * ARM-specific support for Broadcom STB S2/S3/S5 power management
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * S2: clock gate CPUs and as many peripherals as possible
6*4882a593Smuzhiyun  * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
7*4882a593Smuzhiyun  *     self-refresh
8*4882a593Smuzhiyun  * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
9*4882a593Smuzhiyun  *     treat this mode like a soft power-off, with wakeup allowed from AON
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Copyright © 2014-2017 Broadcom
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define pr_fmt(fmt) "brcmstb-pm: " fmt
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun #include <linux/compiler.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/err.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/io.h>
23*4882a593Smuzhiyun #include <linux/ioport.h>
24*4882a593Smuzhiyun #include <linux/kconfig.h>
25*4882a593Smuzhiyun #include <linux/kernel.h>
26*4882a593Smuzhiyun #include <linux/memblock.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/notifier.h>
29*4882a593Smuzhiyun #include <linux/of.h>
30*4882a593Smuzhiyun #include <linux/of_address.h>
31*4882a593Smuzhiyun #include <linux/platform_device.h>
32*4882a593Smuzhiyun #include <linux/pm.h>
33*4882a593Smuzhiyun #include <linux/printk.h>
34*4882a593Smuzhiyun #include <linux/proc_fs.h>
35*4882a593Smuzhiyun #include <linux/sizes.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/sort.h>
38*4882a593Smuzhiyun #include <linux/suspend.h>
39*4882a593Smuzhiyun #include <linux/types.h>
40*4882a593Smuzhiyun #include <linux/uaccess.h>
41*4882a593Smuzhiyun #include <linux/soc/brcmstb/brcmstb.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <asm/fncpy.h>
44*4882a593Smuzhiyun #include <asm/setup.h>
45*4882a593Smuzhiyun #include <asm/suspend.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include "pm.h"
48*4882a593Smuzhiyun #include "aon_defs.h"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define SHIMPHY_DDR_PAD_CNTRL		0x8c
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Method #0 */
53*4882a593Smuzhiyun #define SHIMPHY_PAD_PLL_SEQUENCE	BIT(8)
54*4882a593Smuzhiyun #define SHIMPHY_PAD_GATE_PLL_S3		BIT(9)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Method #1 */
57*4882a593Smuzhiyun #define PWRDWN_SEQ_NO_SEQUENCING	0
58*4882a593Smuzhiyun #define PWRDWN_SEQ_HOLD_CHANNEL		1
59*4882a593Smuzhiyun #define	PWRDWN_SEQ_RESET_PLL		2
60*4882a593Smuzhiyun #define PWRDWN_SEQ_POWERDOWN_PLL	3
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK	0x00f00000
63*4882a593Smuzhiyun #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT	20
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define	DDR_FORCE_CKE_RST_N		BIT(3)
66*4882a593Smuzhiyun #define	DDR_PHY_RST_N			BIT(2)
67*4882a593Smuzhiyun #define	DDR_PHY_CKE			BIT(1)
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define	DDR_PHY_NO_CHANNEL		0xffffffff
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define MAX_NUM_MEMC			3
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun struct brcmstb_memc {
74*4882a593Smuzhiyun 	void __iomem *ddr_phy_base;
75*4882a593Smuzhiyun 	void __iomem *ddr_shimphy_base;
76*4882a593Smuzhiyun 	void __iomem *ddr_ctrl;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct brcmstb_pm_control {
80*4882a593Smuzhiyun 	void __iomem *aon_ctrl_base;
81*4882a593Smuzhiyun 	void __iomem *aon_sram;
82*4882a593Smuzhiyun 	struct brcmstb_memc memcs[MAX_NUM_MEMC];
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	void __iomem *boot_sram;
85*4882a593Smuzhiyun 	size_t boot_sram_len;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	bool support_warm_boot;
88*4882a593Smuzhiyun 	size_t pll_status_offset;
89*4882a593Smuzhiyun 	int num_memc;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	struct brcmstb_s3_params *s3_params;
92*4882a593Smuzhiyun 	dma_addr_t s3_params_pa;
93*4882a593Smuzhiyun 	int s3entry_method;
94*4882a593Smuzhiyun 	u32 warm_boot_offset;
95*4882a593Smuzhiyun 	u32 phy_a_standby_ctrl_offs;
96*4882a593Smuzhiyun 	u32 phy_b_standby_ctrl_offs;
97*4882a593Smuzhiyun 	bool needs_ddr_pad;
98*4882a593Smuzhiyun 	struct platform_device *pdev;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun enum bsp_initiate_command {
102*4882a593Smuzhiyun 	BSP_CLOCK_STOP		= 0x00,
103*4882a593Smuzhiyun 	BSP_GEN_RANDOM_KEY	= 0x4A,
104*4882a593Smuzhiyun 	BSP_RESTORE_RANDOM_KEY	= 0x55,
105*4882a593Smuzhiyun 	BSP_GEN_FIXED_KEY	= 0x63,
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #define PM_INITIATE		0x01
109*4882a593Smuzhiyun #define PM_INITIATE_SUCCESS	0x00
110*4882a593Smuzhiyun #define PM_INITIATE_FAIL	0xfe
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static struct brcmstb_pm_control ctrl;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
115*4882a593Smuzhiyun 		void __iomem *ddr_phy_pll_status);
116*4882a593Smuzhiyun 
brcmstb_init_sram(struct device_node * dn)117*4882a593Smuzhiyun static int brcmstb_init_sram(struct device_node *dn)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	void __iomem *sram;
120*4882a593Smuzhiyun 	struct resource res;
121*4882a593Smuzhiyun 	int ret;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	ret = of_address_to_resource(dn, 0, &res);
124*4882a593Smuzhiyun 	if (ret)
125*4882a593Smuzhiyun 		return ret;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Uncached, executable remapping of SRAM */
128*4882a593Smuzhiyun 	sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
129*4882a593Smuzhiyun 	if (!sram)
130*4882a593Smuzhiyun 		return -ENOMEM;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	ctrl.boot_sram = sram;
133*4882a593Smuzhiyun 	ctrl.boot_sram_len = resource_size(&res);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	return 0;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun static const struct of_device_id sram_dt_ids[] = {
139*4882a593Smuzhiyun 	{ .compatible = "mmio-sram" },
140*4882a593Smuzhiyun 	{ /* sentinel */ }
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
do_bsp_initiate_command(enum bsp_initiate_command cmd)143*4882a593Smuzhiyun static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	void __iomem *base = ctrl.aon_ctrl_base;
146*4882a593Smuzhiyun 	int ret;
147*4882a593Smuzhiyun 	int timeo = 1000 * 1000; /* 1 second */
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
150*4882a593Smuzhiyun 	(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* Go! */
153*4882a593Smuzhiyun 	writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/*
156*4882a593Smuzhiyun 	 * If firmware doesn't support the 'ack', then just assume it's done
157*4882a593Smuzhiyun 	 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
158*4882a593Smuzhiyun 	 */
159*4882a593Smuzhiyun 	if (of_machine_is_compatible("brcm,bcm74371a0")) {
160*4882a593Smuzhiyun 		(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
161*4882a593Smuzhiyun 		mdelay(10);
162*4882a593Smuzhiyun 		return 0;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	for (;;) {
166*4882a593Smuzhiyun 		ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
167*4882a593Smuzhiyun 		if (!(ret & PM_INITIATE))
168*4882a593Smuzhiyun 			break;
169*4882a593Smuzhiyun 		if (timeo <= 0) {
170*4882a593Smuzhiyun 			pr_err("error: timeout waiting for BSP (%x)\n", ret);
171*4882a593Smuzhiyun 			break;
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 		timeo -= 50;
174*4882a593Smuzhiyun 		udelay(50);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return (ret & 0xff) != PM_INITIATE_SUCCESS;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
brcmstb_pm_handshake(void)180*4882a593Smuzhiyun static int brcmstb_pm_handshake(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	void __iomem *base = ctrl.aon_ctrl_base;
183*4882a593Smuzhiyun 	u32 tmp;
184*4882a593Smuzhiyun 	int ret;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* BSP power handshake, v1 */
187*4882a593Smuzhiyun 	tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
188*4882a593Smuzhiyun 	tmp &= ~1UL;
189*4882a593Smuzhiyun 	writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
190*4882a593Smuzhiyun 	(void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
193*4882a593Smuzhiyun 	if (ret)
194*4882a593Smuzhiyun 		pr_err("BSP handshake failed\n");
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/*
197*4882a593Smuzhiyun 	 * HACK: BSP may have internal race on the CLOCK_STOP command.
198*4882a593Smuzhiyun 	 * Avoid touching the BSP for a few milliseconds.
199*4882a593Smuzhiyun 	 */
200*4882a593Smuzhiyun 	mdelay(3);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return ret;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
shimphy_set(u32 value,u32 mask)205*4882a593Smuzhiyun static inline void shimphy_set(u32 value, u32 mask)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	int i;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (!ctrl.needs_ddr_pad)
210*4882a593Smuzhiyun 		return;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	for (i = 0; i < ctrl.num_memc; i++) {
213*4882a593Smuzhiyun 		u32 tmp;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
216*4882a593Smuzhiyun 			SHIMPHY_DDR_PAD_CNTRL);
217*4882a593Smuzhiyun 		tmp = value | (tmp & mask);
218*4882a593Smuzhiyun 		writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
219*4882a593Smuzhiyun 			SHIMPHY_DDR_PAD_CNTRL);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 	wmb(); /* Complete sequence in order. */
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
ddr_ctrl_set(bool warmboot)224*4882a593Smuzhiyun static inline void ddr_ctrl_set(bool warmboot)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	int i;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	for (i = 0; i < ctrl.num_memc; i++) {
229*4882a593Smuzhiyun 		u32 tmp;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
232*4882a593Smuzhiyun 				ctrl.warm_boot_offset);
233*4882a593Smuzhiyun 		if (warmboot)
234*4882a593Smuzhiyun 			tmp |= 1;
235*4882a593Smuzhiyun 		else
236*4882a593Smuzhiyun 			tmp &= ~1; /* Cold boot */
237*4882a593Smuzhiyun 		writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
238*4882a593Smuzhiyun 				ctrl.warm_boot_offset);
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 	/* Complete sequence in order */
241*4882a593Smuzhiyun 	wmb();
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
s3entry_method0(void)244*4882a593Smuzhiyun static inline void s3entry_method0(void)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
247*4882a593Smuzhiyun 		    0xffffffff);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
s3entry_method1(void)250*4882a593Smuzhiyun static inline void s3entry_method1(void)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	/*
253*4882a593Smuzhiyun 	 * S3 Entry Sequence
254*4882a593Smuzhiyun 	 * -----------------
255*4882a593Smuzhiyun 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
256*4882a593Smuzhiyun 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
257*4882a593Smuzhiyun 	 */
258*4882a593Smuzhiyun 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
259*4882a593Smuzhiyun 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
260*4882a593Smuzhiyun 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	ddr_ctrl_set(true);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
s5entry_method1(void)265*4882a593Smuzhiyun static inline void s5entry_method1(void)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	int i;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * S5 Entry Sequence
271*4882a593Smuzhiyun 	 * -----------------
272*4882a593Smuzhiyun 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
273*4882a593Smuzhiyun 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
274*4882a593Smuzhiyun 	 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
275*4882a593Smuzhiyun 	 *	   DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
278*4882a593Smuzhiyun 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
279*4882a593Smuzhiyun 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	ddr_ctrl_set(false);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	for (i = 0; i < ctrl.num_memc; i++) {
284*4882a593Smuzhiyun 		u32 tmp;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 		/* Step 3: Channel A (RST_N = CKE = 0) */
287*4882a593Smuzhiyun 		tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
288*4882a593Smuzhiyun 				  ctrl.phy_a_standby_ctrl_offs);
289*4882a593Smuzhiyun 		tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
290*4882a593Smuzhiyun 		writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
291*4882a593Smuzhiyun 			     ctrl.phy_a_standby_ctrl_offs);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 		/* Step 3: Channel B? */
294*4882a593Smuzhiyun 		if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
295*4882a593Smuzhiyun 			tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
296*4882a593Smuzhiyun 					  ctrl.phy_b_standby_ctrl_offs);
297*4882a593Smuzhiyun 			tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
298*4882a593Smuzhiyun 			writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
299*4882a593Smuzhiyun 				     ctrl.phy_b_standby_ctrl_offs);
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 	/* Must complete */
303*4882a593Smuzhiyun 	wmb();
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /*
307*4882a593Smuzhiyun  * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
308*4882a593Smuzhiyun  * into a low-power mode
309*4882a593Smuzhiyun  */
brcmstb_do_pmsm_power_down(unsigned long base_cmd,bool onewrite)310*4882a593Smuzhiyun static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	void __iomem *base = ctrl.aon_ctrl_base;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
315*4882a593Smuzhiyun 		s5entry_method1();
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* pm_start_pwrdn transition 0->1 */
318*4882a593Smuzhiyun 	writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (!onewrite) {
321*4882a593Smuzhiyun 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 		writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
324*4882a593Smuzhiyun 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 	wfi();
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /* Support S5 cold boot out of "poweroff" */
brcmstb_pm_poweroff(void)330*4882a593Smuzhiyun static void brcmstb_pm_poweroff(void)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	brcmstb_pm_handshake();
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/* Clear magic S3 warm-boot value */
335*4882a593Smuzhiyun 	writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
336*4882a593Smuzhiyun 	(void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* Skip wait-for-interrupt signal; just use a countdown */
339*4882a593Smuzhiyun 	writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
340*4882a593Smuzhiyun 	(void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (ctrl.s3entry_method == 1) {
343*4882a593Smuzhiyun 		shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
344*4882a593Smuzhiyun 			     SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
345*4882a593Smuzhiyun 			     ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
346*4882a593Smuzhiyun 		ddr_ctrl_set(false);
347*4882a593Smuzhiyun 		brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
348*4882a593Smuzhiyun 		return; /* We should never actually get here */
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
brcmstb_pm_copy_to_sram(void * fn,size_t len)354*4882a593Smuzhiyun static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	unsigned int size = ALIGN(len, FNCPY_ALIGN);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (ctrl.boot_sram_len < size) {
359*4882a593Smuzhiyun 		pr_err("standby code will not fit in SRAM\n");
360*4882a593Smuzhiyun 		return NULL;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return fncpy(ctrl.boot_sram, fn, size);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun  * S2 suspend/resume picks up where we left off, so we must execute carefully
368*4882a593Smuzhiyun  * from SRAM, in order to allow DDR to come back up safely before we continue.
369*4882a593Smuzhiyun  */
brcmstb_pm_s2(void)370*4882a593Smuzhiyun static int brcmstb_pm_s2(void)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	/* A previous S3 can set a value hazardous to S2, so make sure. */
373*4882a593Smuzhiyun 	if (ctrl.s3entry_method == 1) {
374*4882a593Smuzhiyun 		shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
375*4882a593Smuzhiyun 			    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
376*4882a593Smuzhiyun 			    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
377*4882a593Smuzhiyun 		ddr_ctrl_set(false);
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
381*4882a593Smuzhiyun 			brcmstb_pm_do_s2_sz);
382*4882a593Smuzhiyun 	if (!brcmstb_pm_do_s2_sram)
383*4882a593Smuzhiyun 		return -EINVAL;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
386*4882a593Smuzhiyun 			ctrl.memcs[0].ddr_phy_base +
387*4882a593Smuzhiyun 			ctrl.pll_status_offset);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun  * This function is called on a new stack, so don't allow inlining (which will
392*4882a593Smuzhiyun  * generate stack references on the old stack). It cannot be made static because
393*4882a593Smuzhiyun  * it is referenced from brcmstb_pm_s3()
394*4882a593Smuzhiyun  */
brcmstb_pm_s3_finish(void)395*4882a593Smuzhiyun noinline int brcmstb_pm_s3_finish(void)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	struct brcmstb_s3_params *params = ctrl.s3_params;
398*4882a593Smuzhiyun 	dma_addr_t params_pa = ctrl.s3_params_pa;
399*4882a593Smuzhiyun 	phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
400*4882a593Smuzhiyun 	enum bsp_initiate_command cmd;
401*4882a593Smuzhiyun 	u32 flags;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	/*
404*4882a593Smuzhiyun 	 * Clear parameter structure, but not DTU area, which has already been
405*4882a593Smuzhiyun 	 * filled in. We know DTU is a the end, so we can just subtract its
406*4882a593Smuzhiyun 	 * size.
407*4882a593Smuzhiyun 	 */
408*4882a593Smuzhiyun 	memset(params, 0, sizeof(*params) - sizeof(params->dtu));
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	flags &= S3_BOOTLOADER_RESERVED;
413*4882a593Smuzhiyun 	flags |= S3_FLAG_NO_MEM_VERIFY;
414*4882a593Smuzhiyun 	flags |= S3_FLAG_LOAD_RANDKEY;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* Load random / fixed key */
417*4882a593Smuzhiyun 	if (flags & S3_FLAG_LOAD_RANDKEY)
418*4882a593Smuzhiyun 		cmd = BSP_GEN_RANDOM_KEY;
419*4882a593Smuzhiyun 	else
420*4882a593Smuzhiyun 		cmd = BSP_GEN_FIXED_KEY;
421*4882a593Smuzhiyun 	if (do_bsp_initiate_command(cmd)) {
422*4882a593Smuzhiyun 		pr_info("key loading failed\n");
423*4882a593Smuzhiyun 		return -EIO;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	params->magic = BRCMSTB_S3_MAGIC;
427*4882a593Smuzhiyun 	params->reentry = reentry;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* No more writes to DRAM */
430*4882a593Smuzhiyun 	flush_cache_all();
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	flags |= BRCMSTB_S3_MAGIC_SHORT;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
435*4882a593Smuzhiyun 	writel_relaxed(lower_32_bits(params_pa),
436*4882a593Smuzhiyun 		       ctrl.aon_sram + AON_REG_CONTROL_LOW);
437*4882a593Smuzhiyun 	writel_relaxed(upper_32_bits(params_pa),
438*4882a593Smuzhiyun 		       ctrl.aon_sram + AON_REG_CONTROL_HIGH);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	switch (ctrl.s3entry_method) {
441*4882a593Smuzhiyun 	case 0:
442*4882a593Smuzhiyun 		s3entry_method0();
443*4882a593Smuzhiyun 		brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
444*4882a593Smuzhiyun 		break;
445*4882a593Smuzhiyun 	case 1:
446*4882a593Smuzhiyun 		s3entry_method1();
447*4882a593Smuzhiyun 		brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
448*4882a593Smuzhiyun 		break;
449*4882a593Smuzhiyun 	default:
450*4882a593Smuzhiyun 		return -EINVAL;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Must have been interrupted from wfi()? */
454*4882a593Smuzhiyun 	return -EINTR;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
brcmstb_pm_do_s3(unsigned long sp)457*4882a593Smuzhiyun static int brcmstb_pm_do_s3(unsigned long sp)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	unsigned long save_sp;
460*4882a593Smuzhiyun 	int ret;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	asm volatile (
463*4882a593Smuzhiyun 		"mov	%[save], sp\n"
464*4882a593Smuzhiyun 		"mov	sp, %[new]\n"
465*4882a593Smuzhiyun 		"bl	brcmstb_pm_s3_finish\n"
466*4882a593Smuzhiyun 		"mov	%[ret], r0\n"
467*4882a593Smuzhiyun 		"mov	%[new], sp\n"
468*4882a593Smuzhiyun 		"mov	sp, %[save]\n"
469*4882a593Smuzhiyun 		: [save] "=&r" (save_sp), [ret] "=&r" (ret)
470*4882a593Smuzhiyun 		: [new] "r" (sp)
471*4882a593Smuzhiyun 	);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return ret;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
brcmstb_pm_s3(void)476*4882a593Smuzhiyun static int brcmstb_pm_s3(void)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
brcmstb_pm_standby(bool deep_standby)483*4882a593Smuzhiyun static int brcmstb_pm_standby(bool deep_standby)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	int ret;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (brcmstb_pm_handshake())
488*4882a593Smuzhiyun 		return -EIO;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (deep_standby)
491*4882a593Smuzhiyun 		ret = brcmstb_pm_s3();
492*4882a593Smuzhiyun 	else
493*4882a593Smuzhiyun 		ret = brcmstb_pm_s2();
494*4882a593Smuzhiyun 	if (ret)
495*4882a593Smuzhiyun 		pr_err("%s: standby failed\n", __func__);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return ret;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
brcmstb_pm_enter(suspend_state_t state)500*4882a593Smuzhiyun static int brcmstb_pm_enter(suspend_state_t state)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	int ret = -EINVAL;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	switch (state) {
505*4882a593Smuzhiyun 	case PM_SUSPEND_STANDBY:
506*4882a593Smuzhiyun 		ret = brcmstb_pm_standby(false);
507*4882a593Smuzhiyun 		break;
508*4882a593Smuzhiyun 	case PM_SUSPEND_MEM:
509*4882a593Smuzhiyun 		ret = brcmstb_pm_standby(true);
510*4882a593Smuzhiyun 		break;
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	return ret;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
brcmstb_pm_valid(suspend_state_t state)516*4882a593Smuzhiyun static int brcmstb_pm_valid(suspend_state_t state)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	switch (state) {
519*4882a593Smuzhiyun 	case PM_SUSPEND_STANDBY:
520*4882a593Smuzhiyun 		return true;
521*4882a593Smuzhiyun 	case PM_SUSPEND_MEM:
522*4882a593Smuzhiyun 		return ctrl.support_warm_boot;
523*4882a593Smuzhiyun 	default:
524*4882a593Smuzhiyun 		return false;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun static const struct platform_suspend_ops brcmstb_pm_ops = {
529*4882a593Smuzhiyun 	.enter		= brcmstb_pm_enter,
530*4882a593Smuzhiyun 	.valid		= brcmstb_pm_valid,
531*4882a593Smuzhiyun };
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun static const struct of_device_id aon_ctrl_dt_ids[] = {
534*4882a593Smuzhiyun 	{ .compatible = "brcm,brcmstb-aon-ctrl" },
535*4882a593Smuzhiyun 	{}
536*4882a593Smuzhiyun };
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun struct ddr_phy_ofdata {
539*4882a593Smuzhiyun 	bool supports_warm_boot;
540*4882a593Smuzhiyun 	size_t pll_status_offset;
541*4882a593Smuzhiyun 	int s3entry_method;
542*4882a593Smuzhiyun 	u32 warm_boot_offset;
543*4882a593Smuzhiyun 	u32 phy_a_standby_ctrl_offs;
544*4882a593Smuzhiyun 	u32 phy_b_standby_ctrl_offs;
545*4882a593Smuzhiyun };
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun static struct ddr_phy_ofdata ddr_phy_71_1 = {
548*4882a593Smuzhiyun 	.supports_warm_boot = true,
549*4882a593Smuzhiyun 	.pll_status_offset = 0x0c,
550*4882a593Smuzhiyun 	.s3entry_method = 1,
551*4882a593Smuzhiyun 	.warm_boot_offset = 0x2c,
552*4882a593Smuzhiyun 	.phy_a_standby_ctrl_offs = 0x198,
553*4882a593Smuzhiyun 	.phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
554*4882a593Smuzhiyun };
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun static struct ddr_phy_ofdata ddr_phy_72_0 = {
557*4882a593Smuzhiyun 	.supports_warm_boot = true,
558*4882a593Smuzhiyun 	.pll_status_offset = 0x10,
559*4882a593Smuzhiyun 	.s3entry_method = 1,
560*4882a593Smuzhiyun 	.warm_boot_offset = 0x40,
561*4882a593Smuzhiyun 	.phy_a_standby_ctrl_offs = 0x2a4,
562*4882a593Smuzhiyun 	.phy_b_standby_ctrl_offs = 0x8a4
563*4882a593Smuzhiyun };
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun static struct ddr_phy_ofdata ddr_phy_225_1 = {
566*4882a593Smuzhiyun 	.supports_warm_boot = false,
567*4882a593Smuzhiyun 	.pll_status_offset = 0x4,
568*4882a593Smuzhiyun 	.s3entry_method = 0
569*4882a593Smuzhiyun };
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun static struct ddr_phy_ofdata ddr_phy_240_1 = {
572*4882a593Smuzhiyun 	.supports_warm_boot = true,
573*4882a593Smuzhiyun 	.pll_status_offset = 0x4,
574*4882a593Smuzhiyun 	.s3entry_method = 0
575*4882a593Smuzhiyun };
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun static const struct of_device_id ddr_phy_dt_ids[] = {
578*4882a593Smuzhiyun 	{
579*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-ddr-phy-v71.1",
580*4882a593Smuzhiyun 		.data = &ddr_phy_71_1,
581*4882a593Smuzhiyun 	},
582*4882a593Smuzhiyun 	{
583*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-ddr-phy-v72.0",
584*4882a593Smuzhiyun 		.data = &ddr_phy_72_0,
585*4882a593Smuzhiyun 	},
586*4882a593Smuzhiyun 	{
587*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-ddr-phy-v225.1",
588*4882a593Smuzhiyun 		.data = &ddr_phy_225_1,
589*4882a593Smuzhiyun 	},
590*4882a593Smuzhiyun 	{
591*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-ddr-phy-v240.1",
592*4882a593Smuzhiyun 		.data = &ddr_phy_240_1,
593*4882a593Smuzhiyun 	},
594*4882a593Smuzhiyun 	{
595*4882a593Smuzhiyun 		/* Same as v240.1, for the registers we care about */
596*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-ddr-phy-v240.2",
597*4882a593Smuzhiyun 		.data = &ddr_phy_240_1,
598*4882a593Smuzhiyun 	},
599*4882a593Smuzhiyun 	{}
600*4882a593Smuzhiyun };
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun struct ddr_seq_ofdata {
603*4882a593Smuzhiyun 	bool needs_ddr_pad;
604*4882a593Smuzhiyun 	u32 warm_boot_offset;
605*4882a593Smuzhiyun };
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun static const struct ddr_seq_ofdata ddr_seq_b22 = {
608*4882a593Smuzhiyun 	.needs_ddr_pad = false,
609*4882a593Smuzhiyun 	.warm_boot_offset = 0x2c,
610*4882a593Smuzhiyun };
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun static const struct ddr_seq_ofdata ddr_seq = {
613*4882a593Smuzhiyun 	.needs_ddr_pad = true,
614*4882a593Smuzhiyun };
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun static const struct of_device_id ddr_shimphy_dt_ids[] = {
617*4882a593Smuzhiyun 	{ .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
618*4882a593Smuzhiyun 	{}
619*4882a593Smuzhiyun };
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun static const struct of_device_id brcmstb_memc_of_match[] = {
622*4882a593Smuzhiyun 	{
623*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
624*4882a593Smuzhiyun 		.data = &ddr_seq,
625*4882a593Smuzhiyun 	},
626*4882a593Smuzhiyun 	{
627*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
628*4882a593Smuzhiyun 		.data = &ddr_seq_b22,
629*4882a593Smuzhiyun 	},
630*4882a593Smuzhiyun 	{
631*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
632*4882a593Smuzhiyun 		.data = &ddr_seq_b22,
633*4882a593Smuzhiyun 	},
634*4882a593Smuzhiyun 	{
635*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
636*4882a593Smuzhiyun 		.data = &ddr_seq_b22,
637*4882a593Smuzhiyun 	},
638*4882a593Smuzhiyun 	{
639*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
640*4882a593Smuzhiyun 		.data = &ddr_seq_b22,
641*4882a593Smuzhiyun 	},
642*4882a593Smuzhiyun 	{
643*4882a593Smuzhiyun 		.compatible = "brcm,brcmstb-memc-ddr",
644*4882a593Smuzhiyun 		.data = &ddr_seq,
645*4882a593Smuzhiyun 	},
646*4882a593Smuzhiyun 	{},
647*4882a593Smuzhiyun };
648*4882a593Smuzhiyun 
brcmstb_ioremap_match(const struct of_device_id * matches,int index,const void ** ofdata)649*4882a593Smuzhiyun static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
650*4882a593Smuzhiyun 					   int index, const void **ofdata)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	struct device_node *dn;
653*4882a593Smuzhiyun 	const struct of_device_id *match;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	dn = of_find_matching_node_and_match(NULL, matches, &match);
656*4882a593Smuzhiyun 	if (!dn)
657*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (ofdata)
660*4882a593Smuzhiyun 		*ofdata = match->data;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	return of_io_request_and_map(dn, index, dn->full_name);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
brcmstb_pm_panic_notify(struct notifier_block * nb,unsigned long action,void * data)665*4882a593Smuzhiyun static int brcmstb_pm_panic_notify(struct notifier_block *nb,
666*4882a593Smuzhiyun 		unsigned long action, void *data)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	return NOTIFY_DONE;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun static struct notifier_block brcmstb_pm_panic_nb = {
674*4882a593Smuzhiyun 	.notifier_call = brcmstb_pm_panic_notify,
675*4882a593Smuzhiyun };
676*4882a593Smuzhiyun 
brcmstb_pm_probe(struct platform_device * pdev)677*4882a593Smuzhiyun static int brcmstb_pm_probe(struct platform_device *pdev)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	const struct ddr_phy_ofdata *ddr_phy_data;
680*4882a593Smuzhiyun 	const struct ddr_seq_ofdata *ddr_seq_data;
681*4882a593Smuzhiyun 	const struct of_device_id *of_id = NULL;
682*4882a593Smuzhiyun 	struct device_node *dn;
683*4882a593Smuzhiyun 	void __iomem *base;
684*4882a593Smuzhiyun 	int ret, i, s;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	/* AON ctrl registers */
687*4882a593Smuzhiyun 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
688*4882a593Smuzhiyun 	if (IS_ERR(base)) {
689*4882a593Smuzhiyun 		pr_err("error mapping AON_CTRL\n");
690*4882a593Smuzhiyun 		ret = PTR_ERR(base);
691*4882a593Smuzhiyun 		goto aon_err;
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 	ctrl.aon_ctrl_base = base;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* AON SRAM registers */
696*4882a593Smuzhiyun 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
697*4882a593Smuzhiyun 	if (IS_ERR(base)) {
698*4882a593Smuzhiyun 		/* Assume standard offset */
699*4882a593Smuzhiyun 		ctrl.aon_sram = ctrl.aon_ctrl_base +
700*4882a593Smuzhiyun 				     AON_CTRL_SYSTEM_DATA_RAM_OFS;
701*4882a593Smuzhiyun 		s = 0;
702*4882a593Smuzhiyun 	} else {
703*4882a593Smuzhiyun 		ctrl.aon_sram = base;
704*4882a593Smuzhiyun 		s = 1;
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	/* DDR PHY registers */
710*4882a593Smuzhiyun 	base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
711*4882a593Smuzhiyun 				     (const void **)&ddr_phy_data);
712*4882a593Smuzhiyun 	if (IS_ERR(base)) {
713*4882a593Smuzhiyun 		pr_err("error mapping DDR PHY\n");
714*4882a593Smuzhiyun 		ret = PTR_ERR(base);
715*4882a593Smuzhiyun 		goto ddr_phy_err;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 	ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
718*4882a593Smuzhiyun 	ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
719*4882a593Smuzhiyun 	/* Only need DDR PHY 0 for now? */
720*4882a593Smuzhiyun 	ctrl.memcs[0].ddr_phy_base = base;
721*4882a593Smuzhiyun 	ctrl.s3entry_method = ddr_phy_data->s3entry_method;
722*4882a593Smuzhiyun 	ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
723*4882a593Smuzhiyun 	ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
724*4882a593Smuzhiyun 	/*
725*4882a593Smuzhiyun 	 * Slightly grosss to use the phy ver to get a memc,
726*4882a593Smuzhiyun 	 * offset but that is the only versioned things so far
727*4882a593Smuzhiyun 	 * we can test for.
728*4882a593Smuzhiyun 	 */
729*4882a593Smuzhiyun 	ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/* DDR SHIM-PHY registers */
732*4882a593Smuzhiyun 	for_each_matching_node(dn, ddr_shimphy_dt_ids) {
733*4882a593Smuzhiyun 		i = ctrl.num_memc;
734*4882a593Smuzhiyun 		if (i >= MAX_NUM_MEMC) {
735*4882a593Smuzhiyun 			of_node_put(dn);
736*4882a593Smuzhiyun 			pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
737*4882a593Smuzhiyun 			break;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		base = of_io_request_and_map(dn, 0, dn->full_name);
741*4882a593Smuzhiyun 		if (IS_ERR(base)) {
742*4882a593Smuzhiyun 			of_node_put(dn);
743*4882a593Smuzhiyun 			if (!ctrl.support_warm_boot)
744*4882a593Smuzhiyun 				break;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 			pr_err("error mapping DDR SHIMPHY %d\n", i);
747*4882a593Smuzhiyun 			ret = PTR_ERR(base);
748*4882a593Smuzhiyun 			goto ddr_shimphy_err;
749*4882a593Smuzhiyun 		}
750*4882a593Smuzhiyun 		ctrl.memcs[i].ddr_shimphy_base = base;
751*4882a593Smuzhiyun 		ctrl.num_memc++;
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	/* Sequencer DRAM Param and Control Registers */
755*4882a593Smuzhiyun 	i = 0;
756*4882a593Smuzhiyun 	for_each_matching_node(dn, brcmstb_memc_of_match) {
757*4882a593Smuzhiyun 		base = of_iomap(dn, 0);
758*4882a593Smuzhiyun 		if (!base) {
759*4882a593Smuzhiyun 			of_node_put(dn);
760*4882a593Smuzhiyun 			pr_err("error mapping DDR Sequencer %d\n", i);
761*4882a593Smuzhiyun 			ret = -ENOMEM;
762*4882a593Smuzhiyun 			goto brcmstb_memc_err;
763*4882a593Smuzhiyun 		}
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 		of_id = of_match_node(brcmstb_memc_of_match, dn);
766*4882a593Smuzhiyun 		if (!of_id) {
767*4882a593Smuzhiyun 			iounmap(base);
768*4882a593Smuzhiyun 			of_node_put(dn);
769*4882a593Smuzhiyun 			ret = -EINVAL;
770*4882a593Smuzhiyun 			goto brcmstb_memc_err;
771*4882a593Smuzhiyun 		}
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 		ddr_seq_data = of_id->data;
774*4882a593Smuzhiyun 		ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
775*4882a593Smuzhiyun 		/* Adjust warm boot offset based on the DDR sequencer */
776*4882a593Smuzhiyun 		if (ddr_seq_data->warm_boot_offset)
777*4882a593Smuzhiyun 			ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		ctrl.memcs[i].ddr_ctrl = base;
780*4882a593Smuzhiyun 		i++;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
784*4882a593Smuzhiyun 		ctrl.support_warm_boot, ctrl.s3entry_method,
785*4882a593Smuzhiyun 		ctrl.warm_boot_offset);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	dn = of_find_matching_node(NULL, sram_dt_ids);
788*4882a593Smuzhiyun 	if (!dn) {
789*4882a593Smuzhiyun 		pr_err("SRAM not found\n");
790*4882a593Smuzhiyun 		ret = -EINVAL;
791*4882a593Smuzhiyun 		goto brcmstb_memc_err;
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	ret = brcmstb_init_sram(dn);
795*4882a593Smuzhiyun 	of_node_put(dn);
796*4882a593Smuzhiyun 	if (ret) {
797*4882a593Smuzhiyun 		pr_err("error setting up SRAM for PM\n");
798*4882a593Smuzhiyun 		goto brcmstb_memc_err;
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	ctrl.pdev = pdev;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
804*4882a593Smuzhiyun 	if (!ctrl.s3_params) {
805*4882a593Smuzhiyun 		ret = -ENOMEM;
806*4882a593Smuzhiyun 		goto s3_params_err;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 	ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
809*4882a593Smuzhiyun 					   sizeof(*ctrl.s3_params),
810*4882a593Smuzhiyun 					   DMA_TO_DEVICE);
811*4882a593Smuzhiyun 	if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
812*4882a593Smuzhiyun 		pr_err("error mapping DMA memory\n");
813*4882a593Smuzhiyun 		ret = -ENOMEM;
814*4882a593Smuzhiyun 		goto out;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	atomic_notifier_chain_register(&panic_notifier_list,
818*4882a593Smuzhiyun 				       &brcmstb_pm_panic_nb);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	pm_power_off = brcmstb_pm_poweroff;
821*4882a593Smuzhiyun 	suspend_set_ops(&brcmstb_pm_ops);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	return 0;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun out:
826*4882a593Smuzhiyun 	kfree(ctrl.s3_params);
827*4882a593Smuzhiyun s3_params_err:
828*4882a593Smuzhiyun 	iounmap(ctrl.boot_sram);
829*4882a593Smuzhiyun brcmstb_memc_err:
830*4882a593Smuzhiyun 	for (i--; i >= 0; i--)
831*4882a593Smuzhiyun 		iounmap(ctrl.memcs[i].ddr_ctrl);
832*4882a593Smuzhiyun ddr_shimphy_err:
833*4882a593Smuzhiyun 	for (i = 0; i < ctrl.num_memc; i++)
834*4882a593Smuzhiyun 		iounmap(ctrl.memcs[i].ddr_shimphy_base);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	iounmap(ctrl.memcs[0].ddr_phy_base);
837*4882a593Smuzhiyun ddr_phy_err:
838*4882a593Smuzhiyun 	iounmap(ctrl.aon_ctrl_base);
839*4882a593Smuzhiyun 	if (s)
840*4882a593Smuzhiyun 		iounmap(ctrl.aon_sram);
841*4882a593Smuzhiyun aon_err:
842*4882a593Smuzhiyun 	pr_warn("PM: initialization failed with code %d\n", ret);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	return ret;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun static struct platform_driver brcmstb_pm_driver = {
848*4882a593Smuzhiyun 	.driver = {
849*4882a593Smuzhiyun 		.name	= "brcmstb-pm",
850*4882a593Smuzhiyun 		.of_match_table = aon_ctrl_dt_ids,
851*4882a593Smuzhiyun 	},
852*4882a593Smuzhiyun };
853*4882a593Smuzhiyun 
brcmstb_pm_init(void)854*4882a593Smuzhiyun static int __init brcmstb_pm_init(void)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	return platform_driver_probe(&brcmstb_pm_driver,
857*4882a593Smuzhiyun 				     brcmstb_pm_probe);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun module_init(brcmstb_pm_init);
860