xref: /rk3399_ARM-atf/plat/xilinx/versal_net/plat_psci.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
4  * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
5  *
6  * SPDX-License-Identifier: BSD-3-Clause
7  */
8 
9 #include <assert.h>
10 
11 #include <common/debug.h>
12 #include <common/runtime_svc.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 #include <plat/arm/common/plat_arm.h>
16 #include <plat/common/platform.h>
17 #include <plat_arm.h>
18 
19 #include <plat_private.h>
20 #include <pm_defs.h>
21 
22 static uintptr_t versal_net_sec_entry;
23 
24 static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
25 {
26 	dsb();
27 	wfi();
28 }
29 
30 static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
31 {
32 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
33 	uint32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
34 	uint32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
35 	uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
36 	uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + (cluster * 0x4);
37 
38 	VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
39 		__func__, mpidr, cpu_id, cpu, cluster);
40 
41 	if (cpu_id == -1) {
42 		return PSCI_E_INTERN_FAIL;
43 	}
44 
45 	if (platform_id == VERSAL_NET_SPP && cluster > 1) {
46 		panic();
47 	}
48 
49 	if (cluster > 3) {
50 		panic();
51 	}
52 
53 	apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + (cluster * APU_PCLI_CLUSTER_STEP);
54 	apu_cluster_base = APU_CLUSTER0 + (cluster * APU_CLUSTER_STEP);
55 
56 	/* Enable clock */
57 	mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + (cluster * 0x4), ACPU_CLK_CTRL_CLKACT);
58 
59 	/* Enable cluster states */
60 	mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
61 	mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
62 
63 	/* assert core reset */
64 	mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
65 
66 	/* program RVBAR */
67 	mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
68 		      (uint32_t)versal_net_sec_entry);
69 	mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
70 		      versal_net_sec_entry >> 32);
71 
72 	/* de-assert core reset */
73 	mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
74 
75 	/* clear cluster resets */
76 	mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
77 	mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
78 
79 	apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
80 			(APU_PCLI_CLUSTER_CPU_STEP * cluster);
81 
82 	mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
83 	mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
84 
85 	return PSCI_E_SUCCESS;
86 }
87 
88 static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
89 {
90 }
91 
92 static void __dead2 zynqmp_nopmu_system_reset(void)
93 {
94 	while (1)
95 		wfi();
96 }
97 
98 static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
99 {
100 	return PSCI_E_SUCCESS;
101 }
102 
103 static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state)
104 {
105 }
106 
107 static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
108 {
109 	plat_arm_gic_pcpu_init();
110 	plat_arm_gic_cpuif_enable();
111 }
112 
113 static void zynqmp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
114 {
115 }
116 
117 static void __dead2 zynqmp_system_off(void)
118 {
119 	while (1)
120 		wfi();
121 }
122 
123 static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
124 {
125 	return PSCI_E_SUCCESS;
126 }
127 
128 static void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
129 {
130 	req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
131 	req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
132 }
133 
134 static const struct plat_psci_ops versal_net_nopmc_psci_ops = {
135 	.cpu_standby			= zynqmp_cpu_standby,
136 	.pwr_domain_on			= zynqmp_nopmu_pwr_domain_on,
137 	.pwr_domain_off			= zynqmp_nopmu_pwr_domain_off,
138 	.system_reset			= zynqmp_nopmu_system_reset,
139 	.validate_ns_entrypoint		= zynqmp_validate_ns_entrypoint,
140 	.pwr_domain_suspend		= zynqmp_pwr_domain_suspend,
141 	.pwr_domain_on_finish		= zynqmp_pwr_domain_on_finish,
142 	.pwr_domain_suspend_finish	= zynqmp_pwr_domain_suspend_finish,
143 	.system_off			= zynqmp_system_off,
144 	.validate_power_state		= zynqmp_validate_power_state,
145 	.get_sys_suspend_power_state	= zynqmp_get_sys_suspend_power_state,
146 };
147 
148 /*******************************************************************************
149  * Export the platform specific power ops.
150  ******************************************************************************/
151 int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
152 			    const struct plat_psci_ops **psci_ops)
153 {
154 	versal_net_sec_entry = sec_entrypoint;
155 
156 	VERBOSE("Setting up entry point %lx\n", versal_net_sec_entry);
157 
158 	*psci_ops = &versal_net_nopmc_psci_ops;
159 
160 	return 0;
161 }
162 
163 int sip_svc_setup_init(void)
164 {
165 	return 0;
166 }
167 
168 static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
169 			   uint32_t arg1, uint32_t arg2)
170 {
171 	VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
172 	if (ioctl_id == IOCTL_OSPI_MUX_SELECT) {
173 		mmio_write_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, arg1);
174 		return 0;
175 	}
176 	return PM_RET_ERROR_IOCTL_NOT_SUPPORTED;
177 }
178 
179 static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
180 			      uint64_t x4, void *cookie, void *handle, uint64_t flags)
181 {
182 	int32_t ret;
183 	uint32_t arg[4], api_id;
184 
185 	arg[0] = (uint32_t)x1;
186 	arg[1] = (uint32_t)(x1 >> 32);
187 	arg[2] = (uint32_t)x2;
188 	arg[3] = (uint32_t)(x2 >> 32);
189 
190 	api_id = smc_fid & FUNCID_NUM_MASK;
191 	VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
192 
193 	switch (api_id) {
194 	case PM_IOCTL:
195 	{
196 		ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
197 		SMC_RET1(handle, (uint64_t)ret);
198 	}
199 	case PM_GET_CHIPID:
200 	{
201 		uint32_t idcode, version;
202 
203 		idcode  = mmio_read_32(PMC_TAP);
204 		version = mmio_read_32(PMC_TAP_VERSION);
205 		SMC_RET2(handle, ((uint64_t)idcode << 32), version);
206 	}
207 	default:
208 		WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
209 		SMC_RET1(handle, SMC_UNK);
210 	}
211 }
212 
213 uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
214 		     void *cookie, void *handle, uint64_t flags)
215 {
216 	return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
217 }
218