1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/atomic.h>
9*4882a593Smuzhiyun #include <linux/cpu_pm.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/iopoll.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/of.h>
18*4882a593Smuzhiyun #include <linux/of_irq.h>
19*4882a593Smuzhiyun #include <linux/of_platform.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/spinlock.h>
23*4882a593Smuzhiyun #include <linux/wait.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <soc/qcom/cmd-db.h>
26*4882a593Smuzhiyun #include <soc/qcom/tcs.h>
27*4882a593Smuzhiyun #include <dt-bindings/soc/qcom,rpmh-rsc.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "rpmh-internal.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
32*4882a593Smuzhiyun #include "trace-rpmh.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define RSC_DRV_TCS_OFFSET 672
35*4882a593Smuzhiyun #define RSC_DRV_CMD_OFFSET 20
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* DRV HW Solver Configuration Information Register */
38*4882a593Smuzhiyun #define DRV_SOLVER_CONFIG 0x04
39*4882a593Smuzhiyun #define DRV_HW_SOLVER_MASK 1
40*4882a593Smuzhiyun #define DRV_HW_SOLVER_SHIFT 24
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* DRV TCS Configuration Information Register */
43*4882a593Smuzhiyun #define DRV_PRNT_CHLD_CONFIG 0x0C
44*4882a593Smuzhiyun #define DRV_NUM_TCS_MASK 0x3F
45*4882a593Smuzhiyun #define DRV_NUM_TCS_SHIFT 6
46*4882a593Smuzhiyun #define DRV_NCPT_MASK 0x1F
47*4882a593Smuzhiyun #define DRV_NCPT_SHIFT 27
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Offsets for common TCS Registers, one bit per TCS */
50*4882a593Smuzhiyun #define RSC_DRV_IRQ_ENABLE 0x00
51*4882a593Smuzhiyun #define RSC_DRV_IRQ_STATUS 0x04
52*4882a593Smuzhiyun #define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Offsets for per TCS Registers.
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * TCSes start at 0x10 from tcs_base and are stored one after another.
58*4882a593Smuzhiyun * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
59*4882a593Smuzhiyun * of the below to find a register.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */
62*4882a593Smuzhiyun #define RSC_DRV_CONTROL 0x14
63*4882a593Smuzhiyun #define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */
64*4882a593Smuzhiyun #define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Offsets for per command in a TCS.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Commands (up to 16) start at 0x30 in a TCS; multiply command index
70*4882a593Smuzhiyun * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define RSC_DRV_CMD_MSGID 0x30
73*4882a593Smuzhiyun #define RSC_DRV_CMD_ADDR 0x34
74*4882a593Smuzhiyun #define RSC_DRV_CMD_DATA 0x38
75*4882a593Smuzhiyun #define RSC_DRV_CMD_STATUS 0x3C
76*4882a593Smuzhiyun #define RSC_DRV_CMD_RESP_DATA 0x40
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define TCS_AMC_MODE_ENABLE BIT(16)
79*4882a593Smuzhiyun #define TCS_AMC_MODE_TRIGGER BIT(24)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* TCS CMD register bit mask */
82*4882a593Smuzhiyun #define CMD_MSGID_LEN 8
83*4882a593Smuzhiyun #define CMD_MSGID_RESP_REQ BIT(8)
84*4882a593Smuzhiyun #define CMD_MSGID_WRITE BIT(16)
85*4882a593Smuzhiyun #define CMD_STATUS_ISSUED BIT(8)
86*4882a593Smuzhiyun #define CMD_STATUS_COMPL BIT(16)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Here's a high level overview of how all the registers in RPMH work
90*4882a593Smuzhiyun * together:
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * - The main rpmh-rsc address is the base of a register space that can
93*4882a593Smuzhiyun * be used to find overall configuration of the hardware
94*4882a593Smuzhiyun * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
95*4882a593Smuzhiyun * space are all the TCS blocks. The offset of the TCS blocks is
96*4882a593Smuzhiyun * specified in the device tree by "qcom,tcs-offset" and used to
97*4882a593Smuzhiyun * compute tcs_base.
98*4882a593Smuzhiyun * - TCS blocks come one after another. Type, count, and order are
99*4882a593Smuzhiyun * specified by the device tree as "qcom,tcs-config".
100*4882a593Smuzhiyun * - Each TCS block has some registers, then space for up to 16 commands.
101*4882a593Smuzhiyun * Note that though address space is reserved for 16 commands, fewer
102*4882a593Smuzhiyun * might be present. See ncpt (num cmds per TCS).
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * Here's a picture:
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * +---------------------------------------------------+
107*4882a593Smuzhiyun * |RSC |
108*4882a593Smuzhiyun * | ctrl |
109*4882a593Smuzhiyun * | |
110*4882a593Smuzhiyun * | Drvs: |
111*4882a593Smuzhiyun * | +-----------------------------------------------+ |
112*4882a593Smuzhiyun * | |DRV0 | |
113*4882a593Smuzhiyun * | | ctrl/config | |
114*4882a593Smuzhiyun * | | IRQ | |
115*4882a593Smuzhiyun * | | | |
116*4882a593Smuzhiyun * | | TCSes: | |
117*4882a593Smuzhiyun * | | +------------------------------------------+ | |
118*4882a593Smuzhiyun * | | |TCS0 | | | | | | | | | | | | | | |
119*4882a593Smuzhiyun * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
120*4882a593Smuzhiyun * | | | | | | | | | | | | | | | | | |
121*4882a593Smuzhiyun * | | +------------------------------------------+ | |
122*4882a593Smuzhiyun * | | +------------------------------------------+ | |
123*4882a593Smuzhiyun * | | |TCS1 | | | | | | | | | | | | | | |
124*4882a593Smuzhiyun * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
125*4882a593Smuzhiyun * | | | | | | | | | | | | | | | | | |
126*4882a593Smuzhiyun * | | +------------------------------------------+ | |
127*4882a593Smuzhiyun * | | +------------------------------------------+ | |
128*4882a593Smuzhiyun * | | |TCS2 | | | | | | | | | | | | | | |
129*4882a593Smuzhiyun * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
130*4882a593Smuzhiyun * | | | | | | | | | | | | | | | | | |
131*4882a593Smuzhiyun * | | +------------------------------------------+ | |
132*4882a593Smuzhiyun * | | ...... | |
133*4882a593Smuzhiyun * | +-----------------------------------------------+ |
134*4882a593Smuzhiyun * | +-----------------------------------------------+ |
135*4882a593Smuzhiyun * | |DRV1 | |
136*4882a593Smuzhiyun * | | (same as DRV0) | |
137*4882a593Smuzhiyun * | +-----------------------------------------------+ |
138*4882a593Smuzhiyun * | ...... |
139*4882a593Smuzhiyun * +---------------------------------------------------+
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun static inline void __iomem *
tcs_reg_addr(const struct rsc_drv * drv,int reg,int tcs_id)143*4882a593Smuzhiyun tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun static inline void __iomem *
tcs_cmd_addr(const struct rsc_drv * drv,int reg,int tcs_id,int cmd_id)149*4882a593Smuzhiyun tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
read_tcs_cmd(const struct rsc_drv * drv,int reg,int tcs_id,int cmd_id)154*4882a593Smuzhiyun static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
155*4882a593Smuzhiyun int cmd_id)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
read_tcs_reg(const struct rsc_drv * drv,int reg,int tcs_id)160*4882a593Smuzhiyun static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
write_tcs_cmd(const struct rsc_drv * drv,int reg,int tcs_id,int cmd_id,u32 data)165*4882a593Smuzhiyun static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
166*4882a593Smuzhiyun int cmd_id, u32 data)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
write_tcs_reg(const struct rsc_drv * drv,int reg,int tcs_id,u32 data)171*4882a593Smuzhiyun static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
172*4882a593Smuzhiyun u32 data)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
write_tcs_reg_sync(const struct rsc_drv * drv,int reg,int tcs_id,u32 data)177*4882a593Smuzhiyun static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
178*4882a593Smuzhiyun u32 data)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun int i;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun writel(data, tcs_reg_addr(drv, reg, tcs_id));
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * Wait until we read back the same value. Use a counter rather than
186*4882a593Smuzhiyun * ktime for timeout since this may be called after timekeeping stops.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun for (i = 0; i < USEC_PER_SEC; i++) {
189*4882a593Smuzhiyun if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
190*4882a593Smuzhiyun return;
191*4882a593Smuzhiyun udelay(1);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
194*4882a593Smuzhiyun data, tcs_id, reg);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /**
198*4882a593Smuzhiyun * tcs_is_free() - Return if a TCS is totally free.
199*4882a593Smuzhiyun * @drv: The RSC controller.
200*4882a593Smuzhiyun * @tcs_id: The global ID of this TCS.
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * Context: Must be called with the drv->lock held.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Return: true if the given TCS is free.
207*4882a593Smuzhiyun */
tcs_is_free(struct rsc_drv * drv,int tcs_id)208*4882a593Smuzhiyun static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun return !test_bit(tcs_id, drv->tcs_in_use);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
215*4882a593Smuzhiyun * @drv: The RSC controller.
216*4882a593Smuzhiyun * @type: SLEEP_TCS or WAKE_TCS
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * This will clear the "slots" variable of the given tcs_group and also
219*4882a593Smuzhiyun * tell the hardware to forget about all entries.
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * The caller must ensure that no other RPMH actions are happening when this
222*4882a593Smuzhiyun * function is called, since otherwise the device may immediately become
223*4882a593Smuzhiyun * used again even before this function exits.
224*4882a593Smuzhiyun */
tcs_invalidate(struct rsc_drv * drv,int type)225*4882a593Smuzhiyun static void tcs_invalidate(struct rsc_drv *drv, int type)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int m;
228*4882a593Smuzhiyun struct tcs_group *tcs = &drv->tcs[type];
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* Caller ensures nobody else is running so no lock */
231*4882a593Smuzhiyun if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
232*4882a593Smuzhiyun return;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
235*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
236*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
243*4882a593Smuzhiyun * @drv: The RSC controller.
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * The caller must ensure that no other RPMH actions are happening when this
246*4882a593Smuzhiyun * function is called, since otherwise the device may immediately become
247*4882a593Smuzhiyun * used again even before this function exits.
248*4882a593Smuzhiyun */
rpmh_rsc_invalidate(struct rsc_drv * drv)249*4882a593Smuzhiyun void rpmh_rsc_invalidate(struct rsc_drv *drv)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun tcs_invalidate(drv, SLEEP_TCS);
252*4882a593Smuzhiyun tcs_invalidate(drv, WAKE_TCS);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * get_tcs_for_msg() - Get the tcs_group used to send the given message.
257*4882a593Smuzhiyun * @drv: The RSC controller.
258*4882a593Smuzhiyun * @msg: The message we want to send.
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * This is normally pretty straightforward except if we are trying to send
261*4882a593Smuzhiyun * an ACTIVE_ONLY message but don't have any active_only TCSes.
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun * Return: A pointer to a tcs_group or an ERR_PTR.
264*4882a593Smuzhiyun */
get_tcs_for_msg(struct rsc_drv * drv,const struct tcs_request * msg)265*4882a593Smuzhiyun static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
266*4882a593Smuzhiyun const struct tcs_request *msg)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun int type;
269*4882a593Smuzhiyun struct tcs_group *tcs;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun switch (msg->state) {
272*4882a593Smuzhiyun case RPMH_ACTIVE_ONLY_STATE:
273*4882a593Smuzhiyun type = ACTIVE_TCS;
274*4882a593Smuzhiyun break;
275*4882a593Smuzhiyun case RPMH_WAKE_ONLY_STATE:
276*4882a593Smuzhiyun type = WAKE_TCS;
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun case RPMH_SLEEP_STATE:
279*4882a593Smuzhiyun type = SLEEP_TCS;
280*4882a593Smuzhiyun break;
281*4882a593Smuzhiyun default:
282*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * If we are making an active request on a RSC that does not have a
287*4882a593Smuzhiyun * dedicated TCS for active state use, then re-purpose a wake TCS to
288*4882a593Smuzhiyun * send active votes. This is safe because we ensure any active-only
289*4882a593Smuzhiyun * transfers have finished before we use it (maybe by running from
290*4882a593Smuzhiyun * the last CPU in PM code).
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun tcs = &drv->tcs[type];
293*4882a593Smuzhiyun if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
294*4882a593Smuzhiyun tcs = &drv->tcs[WAKE_TCS];
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return tcs;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
301*4882a593Smuzhiyun * @drv: The RSC controller.
302*4882a593Smuzhiyun * @tcs_id: The global ID of this TCS.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * For ACTIVE_ONLY transfers we want to call back into the client when the
305*4882a593Smuzhiyun * transfer finishes. To do this we need the "request" that the client
306*4882a593Smuzhiyun * originally provided us. This function grabs the request that we stashed
307*4882a593Smuzhiyun * when we started the transfer.
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * This only makes sense for ACTIVE_ONLY transfers since those are the only
310*4882a593Smuzhiyun * ones we track sending (the only ones we enable interrupts for and the only
311*4882a593Smuzhiyun * ones we call back to the client for).
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Return: The stashed request.
314*4882a593Smuzhiyun */
get_req_from_tcs(struct rsc_drv * drv,int tcs_id)315*4882a593Smuzhiyun static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
316*4882a593Smuzhiyun int tcs_id)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun struct tcs_group *tcs;
319*4882a593Smuzhiyun int i;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun for (i = 0; i < TCS_TYPE_NR; i++) {
322*4882a593Smuzhiyun tcs = &drv->tcs[i];
323*4882a593Smuzhiyun if (tcs->mask & BIT(tcs_id))
324*4882a593Smuzhiyun return tcs->req[tcs_id - tcs->offset];
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return NULL;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /**
331*4882a593Smuzhiyun * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
332*4882a593Smuzhiyun * @drv: The controller.
333*4882a593Smuzhiyun * @tcs_id: The global ID of this TCS.
334*4882a593Smuzhiyun * @trigger: If true then untrigger/retrigger. If false then just untrigger.
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * In the normal case we only ever call with "trigger=true" to start a
337*4882a593Smuzhiyun * transfer. That will un-trigger/disable the TCS from the last transfer
338*4882a593Smuzhiyun * then trigger/enable for this transfer.
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * If we borrowed a wake TCS for an active-only transfer we'll also call
341*4882a593Smuzhiyun * this function with "trigger=false" to just do the un-trigger/disable
342*4882a593Smuzhiyun * before using the TCS for wake purposes again.
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * Note that the AP is only in charge of triggering active-only transfers.
345*4882a593Smuzhiyun * The AP never triggers sleep/wake values using this function.
346*4882a593Smuzhiyun */
__tcs_set_trigger(struct rsc_drv * drv,int tcs_id,bool trigger)347*4882a593Smuzhiyun static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun u32 enable;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun * HW req: Clear the DRV_CONTROL and enable TCS again
353*4882a593Smuzhiyun * While clearing ensure that the AMC mode trigger is cleared
354*4882a593Smuzhiyun * and then the mode enable is cleared.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
357*4882a593Smuzhiyun enable &= ~TCS_AMC_MODE_TRIGGER;
358*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
359*4882a593Smuzhiyun enable &= ~TCS_AMC_MODE_ENABLE;
360*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (trigger) {
363*4882a593Smuzhiyun /* Enable the AMC mode on the TCS and then trigger the TCS */
364*4882a593Smuzhiyun enable = TCS_AMC_MODE_ENABLE;
365*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
366*4882a593Smuzhiyun enable |= TCS_AMC_MODE_TRIGGER;
367*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
373*4882a593Smuzhiyun * @drv: The controller.
374*4882a593Smuzhiyun * @tcs_id: The global ID of this TCS.
375*4882a593Smuzhiyun * @enable: If true then enable; if false then disable
376*4882a593Smuzhiyun *
377*4882a593Smuzhiyun * We only ever call this when we borrow a wake TCS for an active-only
378*4882a593Smuzhiyun * transfer. For active-only TCSes interrupts are always left enabled.
379*4882a593Smuzhiyun */
enable_tcs_irq(struct rsc_drv * drv,int tcs_id,bool enable)380*4882a593Smuzhiyun static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun u32 data;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
385*4882a593Smuzhiyun if (enable)
386*4882a593Smuzhiyun data |= BIT(tcs_id);
387*4882a593Smuzhiyun else
388*4882a593Smuzhiyun data &= ~BIT(tcs_id);
389*4882a593Smuzhiyun writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * tcs_tx_done() - TX Done interrupt handler.
394*4882a593Smuzhiyun * @irq: The IRQ number (ignored).
395*4882a593Smuzhiyun * @p: Pointer to "struct rsc_drv".
396*4882a593Smuzhiyun *
397*4882a593Smuzhiyun * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
398*4882a593Smuzhiyun * IRQ for) when a transfer is done.
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * Return: IRQ_HANDLED
401*4882a593Smuzhiyun */
tcs_tx_done(int irq,void * p)402*4882a593Smuzhiyun static irqreturn_t tcs_tx_done(int irq, void *p)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct rsc_drv *drv = p;
405*4882a593Smuzhiyun int i, j, err = 0;
406*4882a593Smuzhiyun unsigned long irq_status;
407*4882a593Smuzhiyun const struct tcs_request *req;
408*4882a593Smuzhiyun struct tcs_cmd *cmd;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
413*4882a593Smuzhiyun req = get_req_from_tcs(drv, i);
414*4882a593Smuzhiyun if (!req) {
415*4882a593Smuzhiyun WARN_ON(1);
416*4882a593Smuzhiyun goto skip;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun err = 0;
420*4882a593Smuzhiyun for (j = 0; j < req->num_cmds; j++) {
421*4882a593Smuzhiyun u32 sts;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun cmd = &req->cmds[j];
424*4882a593Smuzhiyun sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
425*4882a593Smuzhiyun if (!(sts & CMD_STATUS_ISSUED) ||
426*4882a593Smuzhiyun ((req->wait_for_compl || cmd->wait) &&
427*4882a593Smuzhiyun !(sts & CMD_STATUS_COMPL))) {
428*4882a593Smuzhiyun pr_err("Incomplete request: %s: addr=%#x data=%#x",
429*4882a593Smuzhiyun drv->name, cmd->addr, cmd->data);
430*4882a593Smuzhiyun err = -EIO;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun trace_rpmh_tx_done(drv, i, req, err);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun * If wake tcs was re-purposed for sending active
438*4882a593Smuzhiyun * votes, clear AMC trigger & enable modes and
439*4882a593Smuzhiyun * disable interrupt for this TCS
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun if (!drv->tcs[ACTIVE_TCS].num_tcs)
442*4882a593Smuzhiyun __tcs_set_trigger(drv, i, false);
443*4882a593Smuzhiyun skip:
444*4882a593Smuzhiyun /* Reclaim the TCS */
445*4882a593Smuzhiyun write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
446*4882a593Smuzhiyun write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
447*4882a593Smuzhiyun writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
448*4882a593Smuzhiyun spin_lock(&drv->lock);
449*4882a593Smuzhiyun clear_bit(i, drv->tcs_in_use);
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Disable interrupt for WAKE TCS to avoid being
452*4882a593Smuzhiyun * spammed with interrupts coming when the solver
453*4882a593Smuzhiyun * sends its wake votes.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun if (!drv->tcs[ACTIVE_TCS].num_tcs)
456*4882a593Smuzhiyun enable_tcs_irq(drv, i, false);
457*4882a593Smuzhiyun spin_unlock(&drv->lock);
458*4882a593Smuzhiyun wake_up(&drv->tcs_wait);
459*4882a593Smuzhiyun if (req)
460*4882a593Smuzhiyun rpmh_tx_done(req, err);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun return IRQ_HANDLED;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
468*4882a593Smuzhiyun * @drv: The controller.
469*4882a593Smuzhiyun * @tcs_id: The global ID of this TCS.
470*4882a593Smuzhiyun * @cmd_id: The index within the TCS to start writing.
471*4882a593Smuzhiyun * @msg: The message we want to send, which will contain several addr/data
472*4882a593Smuzhiyun * pairs to program (but few enough that they all fit in one TCS).
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * This is used for all types of transfers (active, sleep, and wake).
475*4882a593Smuzhiyun */
__tcs_buffer_write(struct rsc_drv * drv,int tcs_id,int cmd_id,const struct tcs_request * msg)476*4882a593Smuzhiyun static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
477*4882a593Smuzhiyun const struct tcs_request *msg)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun u32 msgid, cmd_msgid;
480*4882a593Smuzhiyun u32 cmd_enable = 0;
481*4882a593Smuzhiyun u32 cmd_complete;
482*4882a593Smuzhiyun struct tcs_cmd *cmd;
483*4882a593Smuzhiyun int i, j;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun cmd_msgid = CMD_MSGID_LEN;
486*4882a593Smuzhiyun cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
487*4882a593Smuzhiyun cmd_msgid |= CMD_MSGID_WRITE;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
492*4882a593Smuzhiyun cmd = &msg->cmds[i];
493*4882a593Smuzhiyun cmd_enable |= BIT(j);
494*4882a593Smuzhiyun cmd_complete |= cmd->wait << j;
495*4882a593Smuzhiyun msgid = cmd_msgid;
496*4882a593Smuzhiyun msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
499*4882a593Smuzhiyun write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
500*4882a593Smuzhiyun write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
501*4882a593Smuzhiyun // trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
505*4882a593Smuzhiyun cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
506*4882a593Smuzhiyun write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /**
510*4882a593Smuzhiyun * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
511*4882a593Smuzhiyun * @drv: The controller.
512*4882a593Smuzhiyun * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
513*4882a593Smuzhiyun * @msg: The message we want to send, which will contain several addr/data
514*4882a593Smuzhiyun * pairs to program (but few enough that they all fit in one TCS).
515*4882a593Smuzhiyun *
516*4882a593Smuzhiyun * This will walk through the TCSes in the group and check if any of them
517*4882a593Smuzhiyun * appear to be sending to addresses referenced in the message. If it finds
518*4882a593Smuzhiyun * one it'll return -EBUSY.
519*4882a593Smuzhiyun *
520*4882a593Smuzhiyun * Only for use for active-only transfers.
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * Must be called with the drv->lock held since that protects tcs_in_use.
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * Return: 0 if nothing in flight or -EBUSY if we should try again later.
525*4882a593Smuzhiyun * The caller must re-enable interrupts between tries since that's
526*4882a593Smuzhiyun * the only way tcs_is_free() will ever return true and the only way
527*4882a593Smuzhiyun * RSC_DRV_CMD_ENABLE will ever be cleared.
528*4882a593Smuzhiyun */
check_for_req_inflight(struct rsc_drv * drv,struct tcs_group * tcs,const struct tcs_request * msg)529*4882a593Smuzhiyun static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
530*4882a593Smuzhiyun const struct tcs_request *msg)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun unsigned long curr_enabled;
533*4882a593Smuzhiyun u32 addr;
534*4882a593Smuzhiyun int i, j, k;
535*4882a593Smuzhiyun int tcs_id = tcs->offset;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
538*4882a593Smuzhiyun if (tcs_is_free(drv, tcs_id))
539*4882a593Smuzhiyun continue;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
544*4882a593Smuzhiyun addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
545*4882a593Smuzhiyun for (k = 0; k < msg->num_cmds; k++) {
546*4882a593Smuzhiyun if (addr == msg->cmds[k].addr)
547*4882a593Smuzhiyun return -EBUSY;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return 0;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /**
556*4882a593Smuzhiyun * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
557*4882a593Smuzhiyun * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
558*4882a593Smuzhiyun * we borrowed it because there are zero active-only ones).
559*4882a593Smuzhiyun *
560*4882a593Smuzhiyun * Must be called with the drv->lock held since that protects tcs_in_use.
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * Return: The first tcs that's free.
563*4882a593Smuzhiyun */
find_free_tcs(struct tcs_group * tcs)564*4882a593Smuzhiyun static int find_free_tcs(struct tcs_group *tcs)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun int i;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun for (i = 0; i < tcs->num_tcs; i++) {
569*4882a593Smuzhiyun if (tcs_is_free(tcs->drv, tcs->offset + i))
570*4882a593Smuzhiyun return tcs->offset + i;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun return -EBUSY;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
578*4882a593Smuzhiyun * @drv: The controller.
579*4882a593Smuzhiyun * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
580*4882a593Smuzhiyun * @msg: The data to be sent.
581*4882a593Smuzhiyun *
582*4882a593Smuzhiyun * Claims a tcs in the given tcs_group while making sure that no existing cmd
583*4882a593Smuzhiyun * is in flight that would conflict with the one in @msg.
584*4882a593Smuzhiyun *
585*4882a593Smuzhiyun * Context: Must be called with the drv->lock held since that protects
586*4882a593Smuzhiyun * tcs_in_use.
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
589*4882a593Smuzhiyun * or the tcs_group is full.
590*4882a593Smuzhiyun */
claim_tcs_for_req(struct rsc_drv * drv,struct tcs_group * tcs,const struct tcs_request * msg)591*4882a593Smuzhiyun static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
592*4882a593Smuzhiyun const struct tcs_request *msg)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun int ret;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /*
597*4882a593Smuzhiyun * The h/w does not like if we send a request to the same address,
598*4882a593Smuzhiyun * when one is already in-flight or being processed.
599*4882a593Smuzhiyun */
600*4882a593Smuzhiyun ret = check_for_req_inflight(drv, tcs, msg);
601*4882a593Smuzhiyun if (ret)
602*4882a593Smuzhiyun return ret;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun return find_free_tcs(tcs);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /**
608*4882a593Smuzhiyun * rpmh_rsc_send_data() - Write / trigger active-only message.
609*4882a593Smuzhiyun * @drv: The controller.
610*4882a593Smuzhiyun * @msg: The data to be sent.
611*4882a593Smuzhiyun *
612*4882a593Smuzhiyun * NOTES:
613*4882a593Smuzhiyun * - This is only used for "ACTIVE_ONLY" since the limitations of this
614*4882a593Smuzhiyun * function don't make sense for sleep/wake cases.
615*4882a593Smuzhiyun * - To do the transfer, we will grab a whole TCS for ourselves--we don't
616*4882a593Smuzhiyun * try to share. If there are none available we'll wait indefinitely
617*4882a593Smuzhiyun * for a free one.
618*4882a593Smuzhiyun * - This function will not wait for the commands to be finished, only for
619*4882a593Smuzhiyun * data to be programmed into the RPMh. See rpmh_tx_done() which will
620*4882a593Smuzhiyun * be called when the transfer is fully complete.
621*4882a593Smuzhiyun * - This function must be called with interrupts enabled. If the hardware
622*4882a593Smuzhiyun * is busy doing someone else's transfer we need that transfer to fully
623*4882a593Smuzhiyun * finish so that we can have the hardware, and to fully finish it needs
624*4882a593Smuzhiyun * the interrupt handler to run. If the interrupts is set to run on the
625*4882a593Smuzhiyun * active CPU this can never happen if interrupts are disabled.
626*4882a593Smuzhiyun *
627*4882a593Smuzhiyun * Return: 0 on success, -EINVAL on error.
628*4882a593Smuzhiyun */
rpmh_rsc_send_data(struct rsc_drv * drv,const struct tcs_request * msg)629*4882a593Smuzhiyun int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct tcs_group *tcs;
632*4882a593Smuzhiyun int tcs_id;
633*4882a593Smuzhiyun unsigned long flags;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun tcs = get_tcs_for_msg(drv, msg);
636*4882a593Smuzhiyun if (IS_ERR(tcs))
637*4882a593Smuzhiyun return PTR_ERR(tcs);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun spin_lock_irqsave(&drv->lock, flags);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /* Wait forever for a free tcs. It better be there eventually! */
642*4882a593Smuzhiyun wait_event_lock_irq(drv->tcs_wait,
643*4882a593Smuzhiyun (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
644*4882a593Smuzhiyun drv->lock);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun tcs->req[tcs_id - tcs->offset] = msg;
647*4882a593Smuzhiyun set_bit(tcs_id, drv->tcs_in_use);
648*4882a593Smuzhiyun if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
649*4882a593Smuzhiyun /*
650*4882a593Smuzhiyun * Clear previously programmed WAKE commands in selected
651*4882a593Smuzhiyun * repurposed TCS to avoid triggering them. tcs->slots will be
652*4882a593Smuzhiyun * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
653*4882a593Smuzhiyun */
654*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
655*4882a593Smuzhiyun write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
656*4882a593Smuzhiyun enable_tcs_irq(drv, tcs_id, true);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun spin_unlock_irqrestore(&drv->lock, flags);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun * These two can be done after the lock is released because:
662*4882a593Smuzhiyun * - We marked "tcs_in_use" under lock.
663*4882a593Smuzhiyun * - Once "tcs_in_use" has been marked nobody else could be writing
664*4882a593Smuzhiyun * to these registers until the interrupt goes off.
665*4882a593Smuzhiyun * - The interrupt can't go off until we trigger w/ the last line
666*4882a593Smuzhiyun * of __tcs_set_trigger() below.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun __tcs_buffer_write(drv, tcs_id, 0, msg);
669*4882a593Smuzhiyun __tcs_set_trigger(drv, tcs_id, true);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return 0;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /**
675*4882a593Smuzhiyun * find_slots() - Find a place to write the given message.
676*4882a593Smuzhiyun * @tcs: The tcs group to search.
677*4882a593Smuzhiyun * @msg: The message we want to find room for.
678*4882a593Smuzhiyun * @tcs_id: If we return 0 from the function, we return the global ID of the
679*4882a593Smuzhiyun * TCS to write to here.
680*4882a593Smuzhiyun * @cmd_id: If we return 0 from the function, we return the index of
681*4882a593Smuzhiyun * the command array of the returned TCS where the client should
682*4882a593Smuzhiyun * start writing the message.
683*4882a593Smuzhiyun *
684*4882a593Smuzhiyun * Only for use on sleep/wake TCSes since those are the only ones we maintain
685*4882a593Smuzhiyun * tcs->slots for.
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * Return: -ENOMEM if there was no room, else 0.
688*4882a593Smuzhiyun */
find_slots(struct tcs_group * tcs,const struct tcs_request * msg,int * tcs_id,int * cmd_id)689*4882a593Smuzhiyun static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
690*4882a593Smuzhiyun int *tcs_id, int *cmd_id)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun int slot, offset;
693*4882a593Smuzhiyun int i = 0;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* Do over, until we can fit the full payload in a single TCS */
696*4882a593Smuzhiyun do {
697*4882a593Smuzhiyun slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
698*4882a593Smuzhiyun i, msg->num_cmds, 0);
699*4882a593Smuzhiyun if (slot >= tcs->num_tcs * tcs->ncpt)
700*4882a593Smuzhiyun return -ENOMEM;
701*4882a593Smuzhiyun i += tcs->ncpt;
702*4882a593Smuzhiyun } while (slot + msg->num_cmds - 1 >= i);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun bitmap_set(tcs->slots, slot, msg->num_cmds);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun offset = slot / tcs->ncpt;
707*4882a593Smuzhiyun *tcs_id = offset + tcs->offset;
708*4882a593Smuzhiyun *cmd_id = slot % tcs->ncpt;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return 0;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
715*4882a593Smuzhiyun * @drv: The controller.
716*4882a593Smuzhiyun * @msg: The data to be written to the controller.
717*4882a593Smuzhiyun *
718*4882a593Smuzhiyun * This should only be called for for sleep/wake state, never active-only
719*4882a593Smuzhiyun * state.
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * The caller must ensure that no other RPMH actions are happening and the
722*4882a593Smuzhiyun * controller is idle when this function is called since it runs lockless.
723*4882a593Smuzhiyun *
724*4882a593Smuzhiyun * Return: 0 if no error; else -error.
725*4882a593Smuzhiyun */
rpmh_rsc_write_ctrl_data(struct rsc_drv * drv,const struct tcs_request * msg)726*4882a593Smuzhiyun int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct tcs_group *tcs;
729*4882a593Smuzhiyun int tcs_id = 0, cmd_id = 0;
730*4882a593Smuzhiyun int ret;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun tcs = get_tcs_for_msg(drv, msg);
733*4882a593Smuzhiyun if (IS_ERR(tcs))
734*4882a593Smuzhiyun return PTR_ERR(tcs);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* find the TCS id and the command in the TCS to write to */
737*4882a593Smuzhiyun ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
738*4882a593Smuzhiyun if (!ret)
739*4882a593Smuzhiyun __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun return ret;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /**
745*4882a593Smuzhiyun * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
746*4882a593Smuzhiyun * @drv: The controller
747*4882a593Smuzhiyun *
748*4882a593Smuzhiyun * Checks if any of the AMCs are busy in handling ACTIVE sets.
749*4882a593Smuzhiyun * This is called from the last cpu powering down before flushing
750*4882a593Smuzhiyun * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
751*4882a593Smuzhiyun * power collapse, so deny from the last cpu's pm notification.
752*4882a593Smuzhiyun *
753*4882a593Smuzhiyun * Context: Must be called with the drv->lock held.
754*4882a593Smuzhiyun *
755*4882a593Smuzhiyun * Return:
756*4882a593Smuzhiyun * * False - AMCs are idle
757*4882a593Smuzhiyun * * True - AMCs are busy
758*4882a593Smuzhiyun */
rpmh_rsc_ctrlr_is_busy(struct rsc_drv * drv)759*4882a593Smuzhiyun static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun int m;
762*4882a593Smuzhiyun struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /*
765*4882a593Smuzhiyun * If we made an active request on a RSC that does not have a
766*4882a593Smuzhiyun * dedicated TCS for active state use, then re-purposed wake TCSes
767*4882a593Smuzhiyun * should be checked for not busy, because we used wake TCSes for
768*4882a593Smuzhiyun * active requests in this case.
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun if (!tcs->num_tcs)
771*4882a593Smuzhiyun tcs = &drv->tcs[WAKE_TCS];
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
774*4882a593Smuzhiyun if (!tcs_is_free(drv, m))
775*4882a593Smuzhiyun return true;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return false;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /**
782*4882a593Smuzhiyun * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
783*4882a593Smuzhiyun * @nfb: Pointer to the notifier block in struct rsc_drv.
784*4882a593Smuzhiyun * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
785*4882a593Smuzhiyun * @v: Unused
786*4882a593Smuzhiyun *
787*4882a593Smuzhiyun * This function is given to cpu_pm_register_notifier so we can be informed
788*4882a593Smuzhiyun * about when CPUs go down. When all CPUs go down we know no more active
789*4882a593Smuzhiyun * transfers will be started so we write sleep/wake sets. This function gets
790*4882a593Smuzhiyun * called from cpuidle code paths and also at system suspend time.
791*4882a593Smuzhiyun *
792*4882a593Smuzhiyun * If its last CPU going down and AMCs are not busy then writes cached sleep
793*4882a593Smuzhiyun * and wake messages to TCSes. The firmware then takes care of triggering
794*4882a593Smuzhiyun * them when entering deepest low power modes.
795*4882a593Smuzhiyun *
796*4882a593Smuzhiyun * Return: See cpu_pm_register_notifier()
797*4882a593Smuzhiyun */
rpmh_rsc_cpu_pm_callback(struct notifier_block * nfb,unsigned long action,void * v)798*4882a593Smuzhiyun static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
799*4882a593Smuzhiyun unsigned long action, void *v)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
802*4882a593Smuzhiyun int ret = NOTIFY_OK;
803*4882a593Smuzhiyun int cpus_in_pm;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun switch (action) {
806*4882a593Smuzhiyun case CPU_PM_ENTER:
807*4882a593Smuzhiyun cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
808*4882a593Smuzhiyun /*
809*4882a593Smuzhiyun * NOTE: comments for num_online_cpus() point out that it's
810*4882a593Smuzhiyun * only a snapshot so we need to be careful. It should be OK
811*4882a593Smuzhiyun * for us to use, though. It's important for us not to miss
812*4882a593Smuzhiyun * if we're the last CPU going down so it would only be a
813*4882a593Smuzhiyun * problem if a CPU went offline right after we did the check
814*4882a593Smuzhiyun * AND that CPU was not idle AND that CPU was the last non-idle
815*4882a593Smuzhiyun * CPU. That can't happen. CPUs would have to come out of idle
816*4882a593Smuzhiyun * before the CPU could go offline.
817*4882a593Smuzhiyun */
818*4882a593Smuzhiyun if (cpus_in_pm < num_online_cpus())
819*4882a593Smuzhiyun return NOTIFY_OK;
820*4882a593Smuzhiyun break;
821*4882a593Smuzhiyun case CPU_PM_ENTER_FAILED:
822*4882a593Smuzhiyun case CPU_PM_EXIT:
823*4882a593Smuzhiyun atomic_dec(&drv->cpus_in_pm);
824*4882a593Smuzhiyun return NOTIFY_OK;
825*4882a593Smuzhiyun default:
826*4882a593Smuzhiyun return NOTIFY_DONE;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun * It's likely we're on the last CPU. Grab the drv->lock and write
831*4882a593Smuzhiyun * out the sleep/wake commands to RPMH hardware. Grabbing the lock
832*4882a593Smuzhiyun * means that if we race with another CPU coming up we are still
833*4882a593Smuzhiyun * guaranteed to be safe. If another CPU came up just after we checked
834*4882a593Smuzhiyun * and has grabbed the lock or started an active transfer then we'll
835*4882a593Smuzhiyun * notice we're busy and abort. If another CPU comes up after we start
836*4882a593Smuzhiyun * flushing it will be blocked from starting an active transfer until
837*4882a593Smuzhiyun * we're done flushing. If another CPU starts an active transfer after
838*4882a593Smuzhiyun * we release the lock we're still OK because we're no longer the last
839*4882a593Smuzhiyun * CPU.
840*4882a593Smuzhiyun */
841*4882a593Smuzhiyun if (spin_trylock(&drv->lock)) {
842*4882a593Smuzhiyun if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
843*4882a593Smuzhiyun ret = NOTIFY_BAD;
844*4882a593Smuzhiyun spin_unlock(&drv->lock);
845*4882a593Smuzhiyun } else {
846*4882a593Smuzhiyun /* Another CPU must be up */
847*4882a593Smuzhiyun return NOTIFY_OK;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (ret == NOTIFY_BAD) {
851*4882a593Smuzhiyun /* Double-check if we're here because someone else is up */
852*4882a593Smuzhiyun if (cpus_in_pm < num_online_cpus())
853*4882a593Smuzhiyun ret = NOTIFY_OK;
854*4882a593Smuzhiyun else
855*4882a593Smuzhiyun /* We won't be called w/ CPU_PM_ENTER_FAILED */
856*4882a593Smuzhiyun atomic_dec(&drv->cpus_in_pm);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun return ret;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
rpmh_probe_tcs_config(struct platform_device * pdev,struct rsc_drv * drv,void __iomem * base)862*4882a593Smuzhiyun static int rpmh_probe_tcs_config(struct platform_device *pdev,
863*4882a593Smuzhiyun struct rsc_drv *drv, void __iomem *base)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun struct tcs_type_config {
866*4882a593Smuzhiyun u32 type;
867*4882a593Smuzhiyun u32 n;
868*4882a593Smuzhiyun } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
869*4882a593Smuzhiyun struct device_node *dn = pdev->dev.of_node;
870*4882a593Smuzhiyun u32 config, max_tcs, ncpt, offset;
871*4882a593Smuzhiyun int i, ret, n, st = 0;
872*4882a593Smuzhiyun struct tcs_group *tcs;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
875*4882a593Smuzhiyun if (ret)
876*4882a593Smuzhiyun return ret;
877*4882a593Smuzhiyun drv->tcs_base = base + offset;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun max_tcs = config;
882*4882a593Smuzhiyun max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
883*4882a593Smuzhiyun max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
886*4882a593Smuzhiyun ncpt = ncpt >> DRV_NCPT_SHIFT;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun n = of_property_count_u32_elems(dn, "qcom,tcs-config");
889*4882a593Smuzhiyun if (n != 2 * TCS_TYPE_NR)
890*4882a593Smuzhiyun return -EINVAL;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun for (i = 0; i < TCS_TYPE_NR; i++) {
893*4882a593Smuzhiyun ret = of_property_read_u32_index(dn, "qcom,tcs-config",
894*4882a593Smuzhiyun i * 2, &tcs_cfg[i].type);
895*4882a593Smuzhiyun if (ret)
896*4882a593Smuzhiyun return ret;
897*4882a593Smuzhiyun if (tcs_cfg[i].type >= TCS_TYPE_NR)
898*4882a593Smuzhiyun return -EINVAL;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun ret = of_property_read_u32_index(dn, "qcom,tcs-config",
901*4882a593Smuzhiyun i * 2 + 1, &tcs_cfg[i].n);
902*4882a593Smuzhiyun if (ret)
903*4882a593Smuzhiyun return ret;
904*4882a593Smuzhiyun if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
905*4882a593Smuzhiyun return -EINVAL;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun for (i = 0; i < TCS_TYPE_NR; i++) {
909*4882a593Smuzhiyun tcs = &drv->tcs[tcs_cfg[i].type];
910*4882a593Smuzhiyun if (tcs->drv)
911*4882a593Smuzhiyun return -EINVAL;
912*4882a593Smuzhiyun tcs->drv = drv;
913*4882a593Smuzhiyun tcs->type = tcs_cfg[i].type;
914*4882a593Smuzhiyun tcs->num_tcs = tcs_cfg[i].n;
915*4882a593Smuzhiyun tcs->ncpt = ncpt;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
918*4882a593Smuzhiyun continue;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (st + tcs->num_tcs > max_tcs ||
921*4882a593Smuzhiyun st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
922*4882a593Smuzhiyun return -EINVAL;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
925*4882a593Smuzhiyun tcs->offset = st;
926*4882a593Smuzhiyun st += tcs->num_tcs;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun drv->num_tcs = st;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun return 0;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
rpmh_rsc_probe(struct platform_device * pdev)934*4882a593Smuzhiyun static int rpmh_rsc_probe(struct platform_device *pdev)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun struct device_node *dn = pdev->dev.of_node;
937*4882a593Smuzhiyun struct rsc_drv *drv;
938*4882a593Smuzhiyun struct resource *res;
939*4882a593Smuzhiyun char drv_id[10] = {0};
940*4882a593Smuzhiyun int ret, irq;
941*4882a593Smuzhiyun u32 solver_config;
942*4882a593Smuzhiyun void __iomem *base;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /*
945*4882a593Smuzhiyun * Even though RPMh doesn't directly use cmd-db, all of its children
946*4882a593Smuzhiyun * do. To avoid adding this check to our children we'll do it now.
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun ret = cmd_db_ready();
949*4882a593Smuzhiyun if (ret) {
950*4882a593Smuzhiyun if (ret != -EPROBE_DEFER)
951*4882a593Smuzhiyun dev_err(&pdev->dev, "Command DB not available (%d)\n",
952*4882a593Smuzhiyun ret);
953*4882a593Smuzhiyun return ret;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
957*4882a593Smuzhiyun if (!drv)
958*4882a593Smuzhiyun return -ENOMEM;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
961*4882a593Smuzhiyun if (ret)
962*4882a593Smuzhiyun return ret;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun drv->name = of_get_property(dn, "label", NULL);
965*4882a593Smuzhiyun if (!drv->name)
966*4882a593Smuzhiyun drv->name = dev_name(&pdev->dev);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
969*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
970*4882a593Smuzhiyun base = devm_ioremap_resource(&pdev->dev, res);
971*4882a593Smuzhiyun if (IS_ERR(base))
972*4882a593Smuzhiyun return PTR_ERR(base);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun ret = rpmh_probe_tcs_config(pdev, drv, base);
975*4882a593Smuzhiyun if (ret)
976*4882a593Smuzhiyun return ret;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun spin_lock_init(&drv->lock);
979*4882a593Smuzhiyun init_waitqueue_head(&drv->tcs_wait);
980*4882a593Smuzhiyun bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun irq = platform_get_irq(pdev, drv->id);
983*4882a593Smuzhiyun if (irq < 0)
984*4882a593Smuzhiyun return irq;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
987*4882a593Smuzhiyun IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
988*4882a593Smuzhiyun drv->name, drv);
989*4882a593Smuzhiyun if (ret)
990*4882a593Smuzhiyun return ret;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /*
993*4882a593Smuzhiyun * CPU PM notification are not required for controllers that support
994*4882a593Smuzhiyun * 'HW solver' mode where they can be in autonomous mode executing low
995*4882a593Smuzhiyun * power mode to power down.
996*4882a593Smuzhiyun */
997*4882a593Smuzhiyun solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
998*4882a593Smuzhiyun solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
999*4882a593Smuzhiyun solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
1000*4882a593Smuzhiyun if (!solver_config) {
1001*4882a593Smuzhiyun drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
1002*4882a593Smuzhiyun cpu_pm_register_notifier(&drv->rsc_pm);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* Enable the active TCS to send requests immediately */
1006*4882a593Smuzhiyun writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
1007*4882a593Smuzhiyun drv->tcs_base + RSC_DRV_IRQ_ENABLE);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun spin_lock_init(&drv->client.cache_lock);
1010*4882a593Smuzhiyun INIT_LIST_HEAD(&drv->client.cache);
1011*4882a593Smuzhiyun INIT_LIST_HEAD(&drv->client.batch_cache);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun dev_set_drvdata(&pdev->dev, drv);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun return devm_of_platform_populate(&pdev->dev);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun static const struct of_device_id rpmh_drv_match[] = {
1019*4882a593Smuzhiyun { .compatible = "qcom,rpmh-rsc", },
1020*4882a593Smuzhiyun { }
1021*4882a593Smuzhiyun };
1022*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rpmh_drv_match);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun static struct platform_driver rpmh_driver = {
1026*4882a593Smuzhiyun .probe = rpmh_rsc_probe,
1027*4882a593Smuzhiyun .driver = {
1028*4882a593Smuzhiyun .name = "rpmh",
1029*4882a593Smuzhiyun .of_match_table = rpmh_drv_match,
1030*4882a593Smuzhiyun .suppress_bind_attrs = true,
1031*4882a593Smuzhiyun },
1032*4882a593Smuzhiyun };
1033*4882a593Smuzhiyun
rpmh_driver_init(void)1034*4882a593Smuzhiyun static int __init rpmh_driver_init(void)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun return platform_driver_register(&rpmh_driver);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun arch_initcall(rpmh_driver_init);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
1041*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1042