1 /*
2 * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
3 * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
4 * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #include <assert.h>
10
11 #include <common/debug.h>
12 #include <common/runtime_svc.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 #include <plat/arm/common/plat_arm.h>
16 #include <plat/common/platform.h>
17 #include <plat_arm.h>
18
19 #include <plat_private.h>
20 #include <pm_defs.h>
21
22 #define ALWAYSTRUE true
23 #define LINEAR_MODE BIT(1)
24
25 static uintptr_t _sec_entry;
26
zynqmp_cpu_standby(plat_local_state_t cpu_state)27 static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
28 {
29 dsb();
30 wfi();
31 }
32
33 #define MPIDR_MT_BIT (24)
34
zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)35 static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
36 {
37 int32_t cpu_id = plat_core_pos_by_mpidr(mpidr) & ~BIT(MPIDR_MT_BIT);
38 int32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
39 int32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
40 uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
41 uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + ((uint64_t)cluster * 0x4U);
42 int32_t ret = PSCI_E_SUCCESS;
43
44 VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
45 __func__, mpidr, cpu_id, cpu, cluster);
46
47 if (cpu_id == -1) {
48 ret = PSCI_E_INTERN_FAIL;
49 goto exit_label;
50 }
51
52 if (cluster > 3U) {
53 panic();
54 }
55
56 apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + ((uint64_t)cluster * APU_PCLI_CLUSTER_STEP);
57 apu_cluster_base = APU_CLUSTER0 + ((uint64_t)cluster * APU_CLUSTER_STEP);
58
59 /* Enable clock */
60 mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + ((uint64_t)cluster * 0x4U), ACPU_CLK_CTRL_CLKACT);
61
62 /* Enable cluster states */
63 mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
64 mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
65
66 /* assert core reset */
67 mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
68
69 /* program RVBAR */
70 mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
71 (uint32_t)_sec_entry);
72 mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
73 (uint32_t)(_sec_entry >> 32));
74
75 /* de-assert core reset */
76 mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
77
78 /* clear cluster resets */
79 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
80 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
81
82 apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
83 (APU_PCLI_CLUSTER_CPU_STEP * cluster);
84
85 mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
86 mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
87
88 exit_label:
89 return ret;
90 }
91
zynqmp_nopmu_pwr_domain_off(const psci_power_state_t * target_state)92 static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
93 {
94 plat_gic_cpuif_disable();
95 }
96
zynqmp_nopmu_system_reset(void)97 static void __dead2 zynqmp_nopmu_system_reset(void)
98 {
99 while (ALWAYSTRUE) {
100 wfi();
101 }
102 }
103
zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)104 static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
105 {
106 int32_t ret = PSCI_E_INVALID_ADDRESS;
107
108 VERBOSE("Validate ns_entry point %lx\n", ns_entrypoint);
109
110 if ((ns_entrypoint) != 0U) {
111 ret = PSCI_E_SUCCESS;
112 }
113
114 return ret;
115 }
116
zynqmp_pwr_domain_on_finish(const psci_power_state_t * target_state)117 static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
118 {
119 plat_gic_pcpu_init();
120 plat_gic_cpuif_enable();
121 }
122
zynqmp_system_off(void)123 static void __dead2 zynqmp_system_off(void)
124 {
125 while (ALWAYSTRUE) {
126 wfi();
127 }
128 }
129
zynqmp_validate_power_state(uint32_t power_state,psci_power_state_t * req_state)130 static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
131 {
132 return PSCI_E_SUCCESS;
133 }
134
135 static const struct plat_psci_ops _nopmc_psci_ops = {
136 .cpu_standby = zynqmp_cpu_standby,
137 .pwr_domain_on = zynqmp_nopmu_pwr_domain_on,
138 .pwr_domain_off = zynqmp_nopmu_pwr_domain_off,
139 .system_reset = zynqmp_nopmu_system_reset,
140 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint,
141 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish,
142 .system_off = zynqmp_system_off,
143 .validate_power_state = zynqmp_validate_power_state,
144 };
145
146 /*******************************************************************************
147 * Export the platform specific power ops.
148 ******************************************************************************/
plat_setup_psci_ops(uintptr_t sec_entrypoint,const struct plat_psci_ops ** psci_ops)149 int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
150 const struct plat_psci_ops **psci_ops)
151 {
152 _sec_entry = sec_entrypoint;
153
154 VERBOSE("Setting up entry point %lx\n", _sec_entry);
155
156 *psci_ops = &_nopmc_psci_ops;
157
158 return 0;
159 }
160
sip_svc_setup_init(void)161 int sip_svc_setup_init(void)
162 {
163 return 0;
164 }
165
no_pm_ioctl(uint32_t device_id,uint32_t ioctl_id,uint32_t arg1,uint32_t arg2)166 static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
167 uint32_t arg1, uint32_t arg2)
168 {
169 int32_t ret = 0;
170 VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
171
172 switch (ioctl_id) {
173 case IOCTL_OSPI_MUX_SELECT:
174 if ((arg1 == 0) || (arg1 == 1)) {
175 mmio_clrsetbits_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, LINEAR_MODE,
176 (arg1 ? LINEAR_MODE : 0));
177 } else {
178 ret = PM_RET_ERROR_ARGS;
179 }
180 break;
181 case IOCTL_UFS_TXRX_CFGRDY_GET:
182 ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_TX_RX_CONFIG_RDY);
183 break;
184 case IOCTL_UFS_SRAM_CSR_SEL:
185 if (arg1 == 1U) {
186 ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_SRAM_CSR);
187 } else if (arg1 == 0U) {
188 mmio_write_32(PMXC_IOU_SLCR_SRAM_CSR, arg2);
189 }
190 break;
191 case IOCTL_USB_SET_STATE:
192 break;
193 default:
194 ret = PM_RET_ERROR_IOCTL_NOT_SUPPORTED;
195 break;
196 }
197
198 return ret;
199 }
200
no_pm_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,const void * cookie,void * handle,uint64_t flags)201 static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
202 uint64_t x4, const void *cookie, void *handle, uint64_t flags)
203 {
204 int32_t ret;
205 uint32_t arg[4], api_id;
206
207 arg[0] = (uint32_t)x1;
208 arg[1] = (uint32_t)(x1 >> 32);
209 arg[2] = (uint32_t)x2;
210 arg[3] = (uint32_t)(x2 >> 32);
211
212 api_id = smc_fid & FUNCID_NUM_MASK;
213 VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
214
215 switch (api_id) {
216 case PM_IOCTL:
217 {
218 ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
219 /* Firmware driver expects return code in upper 32 bits and
220 * status in lower 32 bits.
221 * status is always SUCCESS(0) for mmio low level register
222 * r/w calls and return value is the value returned from
223 * no_pm_ioctl
224 */
225 SMC_RET1(handle, ((uint64_t)ret << 32));
226 }
227 case PM_GET_CHIPID:
228 {
229 uint32_t idcode, version_type;
230
231 idcode = mmio_read_32(PMC_TAP);
232 version_type = mmio_read_32(PMC_TAP_VERSION);
233 SMC_RET2(handle, ((uint64_t)idcode << 32), version_type);
234 }
235 default:
236 WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
237 SMC_RET1(handle, SMC_UNK);
238 }
239 }
240
smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,const void * cookie,void * handle,uint64_t flags)241 uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
242 const void *cookie, void *handle, uint64_t flags)
243 {
244 return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
245 }
246