1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Versatile Express Serial Power Controller (SPC) support
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2013 ARM Ltd.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
7*4882a593Smuzhiyun * Achin Gupta <achin.gupta@arm.com>
8*4882a593Smuzhiyun * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
12*4882a593Smuzhiyun * published by the Free Software Foundation.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15*4882a593Smuzhiyun * kind, whether express or implied; without even the implied warranty
16*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17*4882a593Smuzhiyun * GNU General Public License for more details.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/clk-provider.h>
21*4882a593Smuzhiyun #include <linux/clkdev.h>
22*4882a593Smuzhiyun #include <linux/cpu.h>
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/err.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/io.h>
27*4882a593Smuzhiyun #include <linux/platform_device.h>
28*4882a593Smuzhiyun #include <linux/pm_opp.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/semaphore.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <asm/cacheflush.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include "spc.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define SPCLOG "vexpress-spc: "
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define PERF_LVL_A15 0x00
39*4882a593Smuzhiyun #define PERF_REQ_A15 0x04
40*4882a593Smuzhiyun #define PERF_LVL_A7 0x08
41*4882a593Smuzhiyun #define PERF_REQ_A7 0x0c
42*4882a593Smuzhiyun #define COMMS 0x10
43*4882a593Smuzhiyun #define COMMS_REQ 0x14
44*4882a593Smuzhiyun #define PWC_STATUS 0x18
45*4882a593Smuzhiyun #define PWC_FLAG 0x1c
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* SPC wake-up IRQs status and mask */
48*4882a593Smuzhiyun #define WAKE_INT_MASK 0x24
49*4882a593Smuzhiyun #define WAKE_INT_RAW 0x28
50*4882a593Smuzhiyun #define WAKE_INT_STAT 0x2c
51*4882a593Smuzhiyun /* SPC power down registers */
52*4882a593Smuzhiyun #define A15_PWRDN_EN 0x30
53*4882a593Smuzhiyun #define A7_PWRDN_EN 0x34
54*4882a593Smuzhiyun /* SPC per-CPU mailboxes */
55*4882a593Smuzhiyun #define A15_BX_ADDR0 0x68
56*4882a593Smuzhiyun #define A7_BX_ADDR0 0x78
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* SPC CPU/cluster reset statue */
59*4882a593Smuzhiyun #define STANDBYWFI_STAT 0x3c
60*4882a593Smuzhiyun #define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu))
61*4882a593Smuzhiyun #define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu)))
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* SPC system config interface registers */
64*4882a593Smuzhiyun #define SYSCFG_WDATA 0x70
65*4882a593Smuzhiyun #define SYSCFG_RDATA 0x74
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* A15/A7 OPP virtual register base */
68*4882a593Smuzhiyun #define A15_PERFVAL_BASE 0xC10
69*4882a593Smuzhiyun #define A7_PERFVAL_BASE 0xC30
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Config interface control bits */
72*4882a593Smuzhiyun #define SYSCFG_START BIT(31)
73*4882a593Smuzhiyun #define SYSCFG_SCC (6 << 20)
74*4882a593Smuzhiyun #define SYSCFG_STAT (14 << 20)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* wake-up interrupt masks */
77*4882a593Smuzhiyun #define GBL_WAKEUP_INT_MSK (0x3 << 10)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* TC2 static dual-cluster configuration */
80*4882a593Smuzhiyun #define MAX_CLUSTERS 2
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
84*4882a593Smuzhiyun * operation, the operation could start just before jiffie is about
85*4882a593Smuzhiyun * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun #define TIMEOUT_US 20000
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define MAX_OPPS 8
90*4882a593Smuzhiyun #define CA15_DVFS 0
91*4882a593Smuzhiyun #define CA7_DVFS 1
92*4882a593Smuzhiyun #define SPC_SYS_CFG 2
93*4882a593Smuzhiyun #define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
94*4882a593Smuzhiyun #define STAT_ERR(type) ((1 << 1) << (type << 2))
95*4882a593Smuzhiyun #define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun struct ve_spc_opp {
98*4882a593Smuzhiyun unsigned long freq;
99*4882a593Smuzhiyun unsigned long u_volt;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun struct ve_spc_drvdata {
103*4882a593Smuzhiyun void __iomem *baseaddr;
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * A15s cluster identifier
106*4882a593Smuzhiyun * It corresponds to A15 processors MPIDR[15:8] bitfield
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun u32 a15_clusid;
109*4882a593Smuzhiyun uint32_t cur_rsp_mask;
110*4882a593Smuzhiyun uint32_t cur_rsp_stat;
111*4882a593Smuzhiyun struct semaphore sem;
112*4882a593Smuzhiyun struct completion done;
113*4882a593Smuzhiyun struct ve_spc_opp *opps[MAX_CLUSTERS];
114*4882a593Smuzhiyun int num_opps[MAX_CLUSTERS];
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun static struct ve_spc_drvdata *info;
118*4882a593Smuzhiyun
cluster_is_a15(u32 cluster)119*4882a593Smuzhiyun static inline bool cluster_is_a15(u32 cluster)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun return cluster == info->a15_clusid;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * ve_spc_global_wakeup_irq()
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * Function to set/clear global wakeup IRQs. Not protected by locking since
128*4882a593Smuzhiyun * it might be used in code paths where normal cacheable locks are not
129*4882a593Smuzhiyun * working. Locking must be provided by the caller to ensure atomicity.
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * @set: if true, global wake-up IRQs are set, if false they are cleared
132*4882a593Smuzhiyun */
ve_spc_global_wakeup_irq(bool set)133*4882a593Smuzhiyun void ve_spc_global_wakeup_irq(bool set)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun u32 reg;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (set)
140*4882a593Smuzhiyun reg |= GBL_WAKEUP_INT_MSK;
141*4882a593Smuzhiyun else
142*4882a593Smuzhiyun reg &= ~GBL_WAKEUP_INT_MSK;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun * ve_spc_cpu_wakeup_irq()
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
151*4882a593Smuzhiyun * it might be used in code paths where normal cacheable locks are not
152*4882a593Smuzhiyun * working. Locking must be provided by the caller to ensure atomicity.
153*4882a593Smuzhiyun *
154*4882a593Smuzhiyun * @cluster: mpidr[15:8] bitfield describing cluster affinity level
155*4882a593Smuzhiyun * @cpu: mpidr[7:0] bitfield describing cpu affinity level
156*4882a593Smuzhiyun * @set: if true, wake-up IRQs are set, if false they are cleared
157*4882a593Smuzhiyun */
ve_spc_cpu_wakeup_irq(u32 cluster,u32 cpu,bool set)158*4882a593Smuzhiyun void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun u32 mask, reg;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (cluster >= MAX_CLUSTERS)
163*4882a593Smuzhiyun return;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun mask = BIT(cpu);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (!cluster_is_a15(cluster))
168*4882a593Smuzhiyun mask <<= 4;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (set)
173*4882a593Smuzhiyun reg |= mask;
174*4882a593Smuzhiyun else
175*4882a593Smuzhiyun reg &= ~mask;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun * ve_spc_set_resume_addr() - set the jump address used for warm boot
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * @cluster: mpidr[15:8] bitfield describing cluster affinity level
184*4882a593Smuzhiyun * @cpu: mpidr[7:0] bitfield describing cpu affinity level
185*4882a593Smuzhiyun * @addr: physical resume address
186*4882a593Smuzhiyun */
ve_spc_set_resume_addr(u32 cluster,u32 cpu,u32 addr)187*4882a593Smuzhiyun void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun void __iomem *baseaddr;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (cluster >= MAX_CLUSTERS)
192*4882a593Smuzhiyun return;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (cluster_is_a15(cluster))
195*4882a593Smuzhiyun baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
196*4882a593Smuzhiyun else
197*4882a593Smuzhiyun baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun writel_relaxed(addr, baseaddr);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun * ve_spc_powerdown()
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * Function to enable/disable cluster powerdown. Not protected by locking
206*4882a593Smuzhiyun * since it might be used in code paths where normal cacheable locks are not
207*4882a593Smuzhiyun * working. Locking must be provided by the caller to ensure atomicity.
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun * @cluster: mpidr[15:8] bitfield describing cluster affinity level
210*4882a593Smuzhiyun * @enable: if true enables powerdown, if false disables it
211*4882a593Smuzhiyun */
ve_spc_powerdown(u32 cluster,bool enable)212*4882a593Smuzhiyun void ve_spc_powerdown(u32 cluster, bool enable)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun u32 pwdrn_reg;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (cluster >= MAX_CLUSTERS)
217*4882a593Smuzhiyun return;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
220*4882a593Smuzhiyun writel_relaxed(enable, info->baseaddr + pwdrn_reg);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
standbywfi_cpu_mask(u32 cpu,u32 cluster)223*4882a593Smuzhiyun static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun return cluster_is_a15(cluster) ?
226*4882a593Smuzhiyun STANDBYWFI_STAT_A15_CPU_MASK(cpu)
227*4882a593Smuzhiyun : STANDBYWFI_STAT_A7_CPU_MASK(cpu);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
234*4882a593Smuzhiyun * @cluster: mpidr[15:8] bitfield describing cluster affinity level
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * @return: non-zero if and only if the specified CPU is in WFI
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * Take care when interpreting the result of this function: a CPU might
239*4882a593Smuzhiyun * be in WFI temporarily due to idle, and is not necessarily safely
240*4882a593Smuzhiyun * parked.
241*4882a593Smuzhiyun */
ve_spc_cpu_in_wfi(u32 cpu,u32 cluster)242*4882a593Smuzhiyun int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun int ret;
245*4882a593Smuzhiyun u32 mask = standbywfi_cpu_mask(cpu, cluster);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (cluster >= MAX_CLUSTERS)
248*4882a593Smuzhiyun return 1;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
253*4882a593Smuzhiyun __func__, STANDBYWFI_STAT, ret, mask);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return ret & mask;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
ve_spc_get_performance(int cluster,u32 * freq)258*4882a593Smuzhiyun static int ve_spc_get_performance(int cluster, u32 *freq)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct ve_spc_opp *opps = info->opps[cluster];
261*4882a593Smuzhiyun u32 perf_cfg_reg = 0;
262*4882a593Smuzhiyun u32 perf;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
267*4882a593Smuzhiyun if (perf >= info->num_opps[cluster])
268*4882a593Smuzhiyun return -EINVAL;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun opps += perf;
271*4882a593Smuzhiyun *freq = opps->freq;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* find closest match to given frequency in OPP table */
ve_spc_round_performance(int cluster,u32 freq)277*4882a593Smuzhiyun static int ve_spc_round_performance(int cluster, u32 freq)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun int idx, max_opp = info->num_opps[cluster];
280*4882a593Smuzhiyun struct ve_spc_opp *opps = info->opps[cluster];
281*4882a593Smuzhiyun u32 fmin = 0, fmax = ~0, ftmp;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun freq /= 1000; /* OPP entries in kHz */
284*4882a593Smuzhiyun for (idx = 0; idx < max_opp; idx++, opps++) {
285*4882a593Smuzhiyun ftmp = opps->freq;
286*4882a593Smuzhiyun if (ftmp >= freq) {
287*4882a593Smuzhiyun if (ftmp <= fmax)
288*4882a593Smuzhiyun fmax = ftmp;
289*4882a593Smuzhiyun } else {
290*4882a593Smuzhiyun if (ftmp >= fmin)
291*4882a593Smuzhiyun fmin = ftmp;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun if (fmax != ~0)
295*4882a593Smuzhiyun return fmax * 1000;
296*4882a593Smuzhiyun else
297*4882a593Smuzhiyun return fmin * 1000;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
ve_spc_find_performance_index(int cluster,u32 freq)300*4882a593Smuzhiyun static int ve_spc_find_performance_index(int cluster, u32 freq)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun int idx, max_opp = info->num_opps[cluster];
303*4882a593Smuzhiyun struct ve_spc_opp *opps = info->opps[cluster];
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun for (idx = 0; idx < max_opp; idx++, opps++)
306*4882a593Smuzhiyun if (opps->freq == freq)
307*4882a593Smuzhiyun break;
308*4882a593Smuzhiyun return (idx == max_opp) ? -EINVAL : idx;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
ve_spc_waitforcompletion(int req_type)311*4882a593Smuzhiyun static int ve_spc_waitforcompletion(int req_type)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun int ret = wait_for_completion_interruptible_timeout(
314*4882a593Smuzhiyun &info->done, usecs_to_jiffies(TIMEOUT_US));
315*4882a593Smuzhiyun if (ret == 0)
316*4882a593Smuzhiyun ret = -ETIMEDOUT;
317*4882a593Smuzhiyun else if (ret > 0)
318*4882a593Smuzhiyun ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
319*4882a593Smuzhiyun return ret;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
ve_spc_set_performance(int cluster,u32 freq)322*4882a593Smuzhiyun static int ve_spc_set_performance(int cluster, u32 freq)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun u32 perf_cfg_reg;
325*4882a593Smuzhiyun int ret, perf, req_type;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (cluster_is_a15(cluster)) {
328*4882a593Smuzhiyun req_type = CA15_DVFS;
329*4882a593Smuzhiyun perf_cfg_reg = PERF_LVL_A15;
330*4882a593Smuzhiyun } else {
331*4882a593Smuzhiyun req_type = CA7_DVFS;
332*4882a593Smuzhiyun perf_cfg_reg = PERF_LVL_A7;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun perf = ve_spc_find_performance_index(cluster, freq);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (perf < 0)
338*4882a593Smuzhiyun return perf;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
341*4882a593Smuzhiyun return -ETIME;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun init_completion(&info->done);
344*4882a593Smuzhiyun info->cur_rsp_mask = RESPONSE_MASK(req_type);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun writel(perf, info->baseaddr + perf_cfg_reg);
347*4882a593Smuzhiyun ret = ve_spc_waitforcompletion(req_type);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun info->cur_rsp_mask = 0;
350*4882a593Smuzhiyun up(&info->sem);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return ret;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
ve_spc_read_sys_cfg(int func,int offset,uint32_t * data)355*4882a593Smuzhiyun static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun int ret;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
360*4882a593Smuzhiyun return -ETIME;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun init_completion(&info->done);
363*4882a593Smuzhiyun info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* Set the control value */
366*4882a593Smuzhiyun writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
367*4882a593Smuzhiyun ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (ret == 0)
370*4882a593Smuzhiyun *data = readl(info->baseaddr + SYSCFG_RDATA);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun info->cur_rsp_mask = 0;
373*4882a593Smuzhiyun up(&info->sem);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun return ret;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
ve_spc_irq_handler(int irq,void * data)378*4882a593Smuzhiyun static irqreturn_t ve_spc_irq_handler(int irq, void *data)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct ve_spc_drvdata *drv_data = data;
381*4882a593Smuzhiyun uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (info->cur_rsp_mask & status) {
384*4882a593Smuzhiyun info->cur_rsp_stat = status;
385*4882a593Smuzhiyun complete(&drv_data->done);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun return IRQ_HANDLED;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * +--------------------------+
393*4882a593Smuzhiyun * | 31 20 | 19 0 |
394*4882a593Smuzhiyun * +--------------------------+
395*4882a593Smuzhiyun * | m_volt | freq(kHz) |
396*4882a593Smuzhiyun * +--------------------------+
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun #define MULT_FACTOR 20
399*4882a593Smuzhiyun #define VOLT_SHIFT 20
400*4882a593Smuzhiyun #define FREQ_MASK (0xFFFFF)
ve_spc_populate_opps(uint32_t cluster)401*4882a593Smuzhiyun static int ve_spc_populate_opps(uint32_t cluster)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun uint32_t data = 0, off, ret, idx;
404*4882a593Smuzhiyun struct ve_spc_opp *opps;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL);
407*4882a593Smuzhiyun if (!opps)
408*4882a593Smuzhiyun return -ENOMEM;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun info->opps[cluster] = opps;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
413*4882a593Smuzhiyun for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
414*4882a593Smuzhiyun ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
415*4882a593Smuzhiyun if (!ret) {
416*4882a593Smuzhiyun opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
417*4882a593Smuzhiyun opps->u_volt = (data >> VOLT_SHIFT) * 1000;
418*4882a593Smuzhiyun } else {
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun info->num_opps[cluster] = idx;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun return ret;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
ve_init_opp_table(struct device * cpu_dev)427*4882a593Smuzhiyun static int ve_init_opp_table(struct device *cpu_dev)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun int cluster;
430*4882a593Smuzhiyun int idx, ret = 0, max_opp;
431*4882a593Smuzhiyun struct ve_spc_opp *opps;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun cluster = topology_physical_package_id(cpu_dev->id);
434*4882a593Smuzhiyun cluster = cluster < 0 ? 0 : cluster;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun max_opp = info->num_opps[cluster];
437*4882a593Smuzhiyun opps = info->opps[cluster];
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun for (idx = 0; idx < max_opp; idx++, opps++) {
440*4882a593Smuzhiyun ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
441*4882a593Smuzhiyun if (ret) {
442*4882a593Smuzhiyun dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
443*4882a593Smuzhiyun opps->freq, opps->u_volt);
444*4882a593Smuzhiyun return ret;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun return ret;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
ve_spc_init(void __iomem * baseaddr,u32 a15_clusid,int irq)450*4882a593Smuzhiyun int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun int ret;
453*4882a593Smuzhiyun info = kzalloc(sizeof(*info), GFP_KERNEL);
454*4882a593Smuzhiyun if (!info)
455*4882a593Smuzhiyun return -ENOMEM;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun info->baseaddr = baseaddr;
458*4882a593Smuzhiyun info->a15_clusid = a15_clusid;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (irq <= 0) {
461*4882a593Smuzhiyun pr_err(SPCLOG "Invalid IRQ %d\n", irq);
462*4882a593Smuzhiyun kfree(info);
463*4882a593Smuzhiyun return -EINVAL;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun init_completion(&info->done);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun readl_relaxed(info->baseaddr + PWC_STATUS);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
471*4882a593Smuzhiyun | IRQF_ONESHOT, "vexpress-spc", info);
472*4882a593Smuzhiyun if (ret) {
473*4882a593Smuzhiyun pr_err(SPCLOG "IRQ %d request failed\n", irq);
474*4882a593Smuzhiyun kfree(info);
475*4882a593Smuzhiyun return -ENODEV;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun sema_init(&info->sem, 1);
479*4882a593Smuzhiyun /*
480*4882a593Smuzhiyun * Multi-cluster systems may need this data when non-coherent, during
481*4882a593Smuzhiyun * cluster power-up/power-down. Make sure driver info reaches main
482*4882a593Smuzhiyun * memory.
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun sync_cache_w(info);
485*4882a593Smuzhiyun sync_cache_w(&info);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun struct clk_spc {
491*4882a593Smuzhiyun struct clk_hw hw;
492*4882a593Smuzhiyun int cluster;
493*4882a593Smuzhiyun };
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
spc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)496*4882a593Smuzhiyun static unsigned long spc_recalc_rate(struct clk_hw *hw,
497*4882a593Smuzhiyun unsigned long parent_rate)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct clk_spc *spc = to_clk_spc(hw);
500*4882a593Smuzhiyun u32 freq;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (ve_spc_get_performance(spc->cluster, &freq))
503*4882a593Smuzhiyun return -EIO;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun return freq * 1000;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
spc_round_rate(struct clk_hw * hw,unsigned long drate,unsigned long * parent_rate)508*4882a593Smuzhiyun static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
509*4882a593Smuzhiyun unsigned long *parent_rate)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun struct clk_spc *spc = to_clk_spc(hw);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return ve_spc_round_performance(spc->cluster, drate);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
spc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)516*4882a593Smuzhiyun static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
517*4882a593Smuzhiyun unsigned long parent_rate)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct clk_spc *spc = to_clk_spc(hw);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return ve_spc_set_performance(spc->cluster, rate / 1000);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun static struct clk_ops clk_spc_ops = {
525*4882a593Smuzhiyun .recalc_rate = spc_recalc_rate,
526*4882a593Smuzhiyun .round_rate = spc_round_rate,
527*4882a593Smuzhiyun .set_rate = spc_set_rate,
528*4882a593Smuzhiyun };
529*4882a593Smuzhiyun
ve_spc_clk_register(struct device * cpu_dev)530*4882a593Smuzhiyun static struct clk *ve_spc_clk_register(struct device *cpu_dev)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct clk_init_data init;
533*4882a593Smuzhiyun struct clk_spc *spc;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun spc = kzalloc(sizeof(*spc), GFP_KERNEL);
536*4882a593Smuzhiyun if (!spc)
537*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun spc->hw.init = &init;
540*4882a593Smuzhiyun spc->cluster = topology_physical_package_id(cpu_dev->id);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun spc->cluster = spc->cluster < 0 ? 0 : spc->cluster;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun init.name = dev_name(cpu_dev);
545*4882a593Smuzhiyun init.ops = &clk_spc_ops;
546*4882a593Smuzhiyun init.flags = CLK_GET_RATE_NOCACHE;
547*4882a593Smuzhiyun init.num_parents = 0;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun return devm_clk_register(cpu_dev, &spc->hw);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
ve_spc_clk_init(void)552*4882a593Smuzhiyun static int __init ve_spc_clk_init(void)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun int cpu, cluster;
555*4882a593Smuzhiyun struct clk *clk;
556*4882a593Smuzhiyun bool init_opp_table[MAX_CLUSTERS] = { false };
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (!info)
559*4882a593Smuzhiyun return 0; /* Continue only if SPC is initialised */
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
562*4882a593Smuzhiyun pr_err("failed to build OPP table\n");
563*4882a593Smuzhiyun return -ENODEV;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
567*4882a593Smuzhiyun struct device *cpu_dev = get_cpu_device(cpu);
568*4882a593Smuzhiyun if (!cpu_dev) {
569*4882a593Smuzhiyun pr_warn("failed to get cpu%d device\n", cpu);
570*4882a593Smuzhiyun continue;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun clk = ve_spc_clk_register(cpu_dev);
573*4882a593Smuzhiyun if (IS_ERR(clk)) {
574*4882a593Smuzhiyun pr_warn("failed to register cpu%d clock\n", cpu);
575*4882a593Smuzhiyun continue;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
578*4882a593Smuzhiyun pr_warn("failed to register cpu%d clock lookup\n", cpu);
579*4882a593Smuzhiyun continue;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun cluster = topology_physical_package_id(cpu_dev->id);
583*4882a593Smuzhiyun if (cluster < 0 || init_opp_table[cluster])
584*4882a593Smuzhiyun continue;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (ve_init_opp_table(cpu_dev))
587*4882a593Smuzhiyun pr_warn("failed to initialise cpu%d opp table\n", cpu);
588*4882a593Smuzhiyun else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
589*4882a593Smuzhiyun topology_core_cpumask(cpu_dev->id)))
590*4882a593Smuzhiyun pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
591*4882a593Smuzhiyun else
592*4882a593Smuzhiyun init_opp_table[cluster] = true;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
596*4882a593Smuzhiyun return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun device_initcall(ve_spc_clk_init);
599