xref: /OK3568_Linux_fs/kernel/arch/arm/mach-imx/cpuidle-imx6sx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014 Freescale Semiconductor, Inc.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/cpuidle.h>
7*4882a593Smuzhiyun #include <linux/cpu_pm.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <asm/cacheflush.h>
10*4882a593Smuzhiyun #include <asm/cpuidle.h>
11*4882a593Smuzhiyun #include <asm/suspend.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "common.h"
14*4882a593Smuzhiyun #include "cpuidle.h"
15*4882a593Smuzhiyun #include "hardware.h"
16*4882a593Smuzhiyun 
imx6sx_idle_finish(unsigned long val)17*4882a593Smuzhiyun static int imx6sx_idle_finish(unsigned long val)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	/*
20*4882a593Smuzhiyun 	 * for Cortex-A7 which has an internal L2
21*4882a593Smuzhiyun 	 * cache, need to flush it before powering
22*4882a593Smuzhiyun 	 * down ARM platform, since flushing L1 cache
23*4882a593Smuzhiyun 	 * here again has very small overhead, compared
24*4882a593Smuzhiyun 	 * to adding conditional code for L2 cache type,
25*4882a593Smuzhiyun 	 * just call flush_cache_all() is fine.
26*4882a593Smuzhiyun 	 */
27*4882a593Smuzhiyun 	flush_cache_all();
28*4882a593Smuzhiyun 	cpu_do_idle();
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	return 0;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
imx6sx_enter_wait(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)33*4882a593Smuzhiyun static int imx6sx_enter_wait(struct cpuidle_device *dev,
34*4882a593Smuzhiyun 			    struct cpuidle_driver *drv, int index)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	imx6_set_lpm(WAIT_UNCLOCKED);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	switch (index) {
39*4882a593Smuzhiyun 	case 1:
40*4882a593Smuzhiyun 		cpu_do_idle();
41*4882a593Smuzhiyun 		break;
42*4882a593Smuzhiyun 	case 2:
43*4882a593Smuzhiyun 		imx6_enable_rbc(true);
44*4882a593Smuzhiyun 		imx_gpc_set_arm_power_in_lpm(true);
45*4882a593Smuzhiyun 		imx_set_cpu_jump(0, v7_cpu_resume);
46*4882a593Smuzhiyun 		/* Need to notify there is a cpu pm operation. */
47*4882a593Smuzhiyun 		cpu_pm_enter();
48*4882a593Smuzhiyun 		cpu_cluster_pm_enter();
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 		cpu_suspend(0, imx6sx_idle_finish);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		cpu_cluster_pm_exit();
53*4882a593Smuzhiyun 		cpu_pm_exit();
54*4882a593Smuzhiyun 		imx_gpc_set_arm_power_in_lpm(false);
55*4882a593Smuzhiyun 		imx6_enable_rbc(false);
56*4882a593Smuzhiyun 		break;
57*4882a593Smuzhiyun 	default:
58*4882a593Smuzhiyun 		break;
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	imx6_set_lpm(WAIT_CLOCKED);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return index;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun static struct cpuidle_driver imx6sx_cpuidle_driver = {
67*4882a593Smuzhiyun 	.name = "imx6sx_cpuidle",
68*4882a593Smuzhiyun 	.owner = THIS_MODULE,
69*4882a593Smuzhiyun 	.states = {
70*4882a593Smuzhiyun 		/* WFI */
71*4882a593Smuzhiyun 		ARM_CPUIDLE_WFI_STATE,
72*4882a593Smuzhiyun 		/* WAIT */
73*4882a593Smuzhiyun 		{
74*4882a593Smuzhiyun 			.exit_latency = 50,
75*4882a593Smuzhiyun 			.target_residency = 75,
76*4882a593Smuzhiyun 			.flags = CPUIDLE_FLAG_TIMER_STOP,
77*4882a593Smuzhiyun 			.enter = imx6sx_enter_wait,
78*4882a593Smuzhiyun 			.name = "WAIT",
79*4882a593Smuzhiyun 			.desc = "Clock off",
80*4882a593Smuzhiyun 		},
81*4882a593Smuzhiyun 		/* WAIT + ARM power off  */
82*4882a593Smuzhiyun 		{
83*4882a593Smuzhiyun 			/*
84*4882a593Smuzhiyun 			 * ARM gating 31us * 5 + RBC clear 65us
85*4882a593Smuzhiyun 			 * and some margin for SW execution, here set it
86*4882a593Smuzhiyun 			 * to 300us.
87*4882a593Smuzhiyun 			 */
88*4882a593Smuzhiyun 			.exit_latency = 300,
89*4882a593Smuzhiyun 			.target_residency = 500,
90*4882a593Smuzhiyun 			.flags = CPUIDLE_FLAG_TIMER_STOP,
91*4882a593Smuzhiyun 			.enter = imx6sx_enter_wait,
92*4882a593Smuzhiyun 			.name = "LOW-POWER-IDLE",
93*4882a593Smuzhiyun 			.desc = "ARM power off",
94*4882a593Smuzhiyun 		},
95*4882a593Smuzhiyun 	},
96*4882a593Smuzhiyun 	.state_count = 3,
97*4882a593Smuzhiyun 	.safe_state_index = 0,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
imx6sx_cpuidle_init(void)100*4882a593Smuzhiyun int __init imx6sx_cpuidle_init(void)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	imx6_set_int_mem_clk_lpm(true);
103*4882a593Smuzhiyun 	imx6_enable_rbc(false);
104*4882a593Smuzhiyun 	imx_gpc_set_l2_mem_power_in_lpm(false);
105*4882a593Smuzhiyun 	/*
106*4882a593Smuzhiyun 	 * set ARM power up/down timing to the fastest,
107*4882a593Smuzhiyun 	 * sw2iso and sw can be set to one 32K cycle = 31us
108*4882a593Smuzhiyun 	 * except for power up sw2iso which need to be
109*4882a593Smuzhiyun 	 * larger than LDO ramp up time.
110*4882a593Smuzhiyun 	 */
111*4882a593Smuzhiyun 	imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1);
112*4882a593Smuzhiyun 	imx_gpc_set_arm_power_down_timing(1, 1);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
115*4882a593Smuzhiyun }
116