1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/atomic.h>
7*4882a593Smuzhiyun #include <linux/bug.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/jiffies.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/lockdep.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/wait.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <soc/qcom/rpmh.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "rpmh-internal.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
28*4882a593Smuzhiyun struct rpmh_request name = { \
29*4882a593Smuzhiyun .msg = { \
30*4882a593Smuzhiyun .state = s, \
31*4882a593Smuzhiyun .cmds = name.cmd, \
32*4882a593Smuzhiyun .num_cmds = 0, \
33*4882a593Smuzhiyun .wait_for_compl = true, \
34*4882a593Smuzhiyun }, \
35*4882a593Smuzhiyun .cmd = { { 0 } }, \
36*4882a593Smuzhiyun .completion = q, \
37*4882a593Smuzhiyun .dev = device, \
38*4882a593Smuzhiyun .needs_free = false, \
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun * struct cache_req: the request object for caching
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * @addr: the address of the resource
47*4882a593Smuzhiyun * @sleep_val: the sleep vote
48*4882a593Smuzhiyun * @wake_val: the wake vote
49*4882a593Smuzhiyun * @list: linked list obj
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun struct cache_req {
52*4882a593Smuzhiyun u32 addr;
53*4882a593Smuzhiyun u32 sleep_val;
54*4882a593Smuzhiyun u32 wake_val;
55*4882a593Smuzhiyun struct list_head list;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * struct batch_cache_req - An entry in our batch catch
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * @list: linked list obj
62*4882a593Smuzhiyun * @count: number of messages
63*4882a593Smuzhiyun * @rpm_msgs: the messages
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct batch_cache_req {
67*4882a593Smuzhiyun struct list_head list;
68*4882a593Smuzhiyun int count;
69*4882a593Smuzhiyun struct rpmh_request rpm_msgs[];
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
get_rpmh_ctrlr(const struct device * dev)72*4882a593Smuzhiyun static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct rsc_drv *drv = dev_get_drvdata(dev->parent);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return &drv->client;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
rpmh_tx_done(const struct tcs_request * msg,int r)79*4882a593Smuzhiyun void rpmh_tx_done(const struct tcs_request *msg, int r)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
82*4882a593Smuzhiyun msg);
83*4882a593Smuzhiyun struct completion *compl = rpm_msg->completion;
84*4882a593Smuzhiyun bool free = rpm_msg->needs_free;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun rpm_msg->err = r;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (r)
89*4882a593Smuzhiyun dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
90*4882a593Smuzhiyun rpm_msg->msg.cmds[0].addr, r);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (!compl)
93*4882a593Smuzhiyun goto exit;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Signal the blocking thread we are done */
96*4882a593Smuzhiyun complete(compl);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun exit:
99*4882a593Smuzhiyun if (free)
100*4882a593Smuzhiyun kfree(rpm_msg);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
__find_req(struct rpmh_ctrlr * ctrlr,u32 addr)103*4882a593Smuzhiyun static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct cache_req *p, *req = NULL;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun list_for_each_entry(p, &ctrlr->cache, list) {
108*4882a593Smuzhiyun if (p->addr == addr) {
109*4882a593Smuzhiyun req = p;
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return req;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
cache_rpm_request(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,struct tcs_cmd * cmd)117*4882a593Smuzhiyun static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
118*4882a593Smuzhiyun enum rpmh_state state,
119*4882a593Smuzhiyun struct tcs_cmd *cmd)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun struct cache_req *req;
122*4882a593Smuzhiyun unsigned long flags;
123*4882a593Smuzhiyun u32 old_sleep_val, old_wake_val;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun spin_lock_irqsave(&ctrlr->cache_lock, flags);
126*4882a593Smuzhiyun req = __find_req(ctrlr, cmd->addr);
127*4882a593Smuzhiyun if (req)
128*4882a593Smuzhiyun goto existing;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_ATOMIC);
131*4882a593Smuzhiyun if (!req) {
132*4882a593Smuzhiyun req = ERR_PTR(-ENOMEM);
133*4882a593Smuzhiyun goto unlock;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun req->addr = cmd->addr;
137*4882a593Smuzhiyun req->sleep_val = req->wake_val = UINT_MAX;
138*4882a593Smuzhiyun list_add_tail(&req->list, &ctrlr->cache);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun existing:
141*4882a593Smuzhiyun old_sleep_val = req->sleep_val;
142*4882a593Smuzhiyun old_wake_val = req->wake_val;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun switch (state) {
145*4882a593Smuzhiyun case RPMH_ACTIVE_ONLY_STATE:
146*4882a593Smuzhiyun case RPMH_WAKE_ONLY_STATE:
147*4882a593Smuzhiyun req->wake_val = cmd->data;
148*4882a593Smuzhiyun break;
149*4882a593Smuzhiyun case RPMH_SLEEP_STATE:
150*4882a593Smuzhiyun req->sleep_val = cmd->data;
151*4882a593Smuzhiyun break;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
155*4882a593Smuzhiyun req->wake_val != old_wake_val) &&
156*4882a593Smuzhiyun req->sleep_val != UINT_MAX &&
157*4882a593Smuzhiyun req->wake_val != UINT_MAX;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun unlock:
160*4882a593Smuzhiyun spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return req;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * __rpmh_write: Cache and send the RPMH request
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * @dev: The device making the request
169*4882a593Smuzhiyun * @state: Active/Sleep request type
170*4882a593Smuzhiyun * @rpm_msg: The data that needs to be sent (cmds).
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * Cache the RPMH request and send if the state is ACTIVE_ONLY.
173*4882a593Smuzhiyun * SLEEP/WAKE_ONLY requests are not sent to the controller at
174*4882a593Smuzhiyun * this time. Use rpmh_flush() to send them to the controller.
175*4882a593Smuzhiyun */
__rpmh_write(const struct device * dev,enum rpmh_state state,struct rpmh_request * rpm_msg)176*4882a593Smuzhiyun static int __rpmh_write(const struct device *dev, enum rpmh_state state,
177*4882a593Smuzhiyun struct rpmh_request *rpm_msg)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
180*4882a593Smuzhiyun int ret = -EINVAL;
181*4882a593Smuzhiyun struct cache_req *req;
182*4882a593Smuzhiyun int i;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun rpm_msg->msg.state = state;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Cache the request in our store and link the payload */
187*4882a593Smuzhiyun for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
188*4882a593Smuzhiyun req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
189*4882a593Smuzhiyun if (IS_ERR(req))
190*4882a593Smuzhiyun return PTR_ERR(req);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun rpm_msg->msg.state = state;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (state == RPMH_ACTIVE_ONLY_STATE) {
196*4882a593Smuzhiyun WARN_ON(irqs_disabled());
197*4882a593Smuzhiyun ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun /* Clean up our call by spoofing tx_done */
200*4882a593Smuzhiyun ret = 0;
201*4882a593Smuzhiyun rpmh_tx_done(&rpm_msg->msg, ret);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return ret;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
__fill_rpmh_msg(struct rpmh_request * req,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)207*4882a593Smuzhiyun static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
208*4882a593Smuzhiyun const struct tcs_cmd *cmd, u32 n)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
211*4882a593Smuzhiyun return -EINVAL;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun memcpy(req->cmd, cmd, n * sizeof(*cmd));
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun req->msg.state = state;
216*4882a593Smuzhiyun req->msg.cmds = req->cmd;
217*4882a593Smuzhiyun req->msg.num_cmds = n;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * rpmh_write_async: Write a set of RPMH commands
224*4882a593Smuzhiyun *
225*4882a593Smuzhiyun * @dev: The device making the request
226*4882a593Smuzhiyun * @state: Active/sleep set
227*4882a593Smuzhiyun * @cmd: The payload data
228*4882a593Smuzhiyun * @n: The number of elements in payload
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * Write a set of RPMH commands, the order of commands is maintained
231*4882a593Smuzhiyun * and will be sent as a single shot.
232*4882a593Smuzhiyun */
rpmh_write_async(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)233*4882a593Smuzhiyun int rpmh_write_async(const struct device *dev, enum rpmh_state state,
234*4882a593Smuzhiyun const struct tcs_cmd *cmd, u32 n)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct rpmh_request *rpm_msg;
237*4882a593Smuzhiyun int ret;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
240*4882a593Smuzhiyun if (!rpm_msg)
241*4882a593Smuzhiyun return -ENOMEM;
242*4882a593Smuzhiyun rpm_msg->needs_free = true;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
245*4882a593Smuzhiyun if (ret) {
246*4882a593Smuzhiyun kfree(rpm_msg);
247*4882a593Smuzhiyun return ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return __rpmh_write(dev, state, rpm_msg);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL(rpmh_write_async);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * rpmh_write: Write a set of RPMH commands and block until response
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * @rc: The RPMH handle got from rpmh_get_client
258*4882a593Smuzhiyun * @state: Active/sleep set
259*4882a593Smuzhiyun * @cmd: The payload data
260*4882a593Smuzhiyun * @n: The number of elements in @cmd
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * May sleep. Do not call from atomic contexts.
263*4882a593Smuzhiyun */
rpmh_write(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)264*4882a593Smuzhiyun int rpmh_write(const struct device *dev, enum rpmh_state state,
265*4882a593Smuzhiyun const struct tcs_cmd *cmd, u32 n)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(compl);
268*4882a593Smuzhiyun DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
269*4882a593Smuzhiyun int ret;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
272*4882a593Smuzhiyun return -EINVAL;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
275*4882a593Smuzhiyun rpm_msg.msg.num_cmds = n;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun ret = __rpmh_write(dev, state, &rpm_msg);
278*4882a593Smuzhiyun if (ret)
279*4882a593Smuzhiyun return ret;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
282*4882a593Smuzhiyun WARN_ON(!ret);
283*4882a593Smuzhiyun return (ret > 0) ? 0 : -ETIMEDOUT;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun EXPORT_SYMBOL(rpmh_write);
286*4882a593Smuzhiyun
cache_batch(struct rpmh_ctrlr * ctrlr,struct batch_cache_req * req)287*4882a593Smuzhiyun static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun unsigned long flags;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_lock_irqsave(&ctrlr->cache_lock, flags);
292*4882a593Smuzhiyun list_add_tail(&req->list, &ctrlr->batch_cache);
293*4882a593Smuzhiyun ctrlr->dirty = true;
294*4882a593Smuzhiyun spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
flush_batch(struct rpmh_ctrlr * ctrlr)297*4882a593Smuzhiyun static int flush_batch(struct rpmh_ctrlr *ctrlr)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct batch_cache_req *req;
300*4882a593Smuzhiyun const struct rpmh_request *rpm_msg;
301*4882a593Smuzhiyun int ret = 0;
302*4882a593Smuzhiyun int i;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Send Sleep/Wake requests to the controller, expect no response */
305*4882a593Smuzhiyun list_for_each_entry(req, &ctrlr->batch_cache, list) {
306*4882a593Smuzhiyun for (i = 0; i < req->count; i++) {
307*4882a593Smuzhiyun rpm_msg = req->rpm_msgs + i;
308*4882a593Smuzhiyun ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
309*4882a593Smuzhiyun &rpm_msg->msg);
310*4882a593Smuzhiyun if (ret)
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return ret;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /**
319*4882a593Smuzhiyun * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
320*4882a593Smuzhiyun * batch to finish.
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun * @dev: the device making the request
323*4882a593Smuzhiyun * @state: Active/sleep set
324*4882a593Smuzhiyun * @cmd: The payload data
325*4882a593Smuzhiyun * @n: The array of count of elements in each batch, 0 terminated.
326*4882a593Smuzhiyun *
327*4882a593Smuzhiyun * Write a request to the RSC controller without caching. If the request
328*4882a593Smuzhiyun * state is ACTIVE, then the requests are treated as completion request
329*4882a593Smuzhiyun * and sent to the controller immediately. The function waits until all the
330*4882a593Smuzhiyun * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
331*4882a593Smuzhiyun * request is sent as fire-n-forget and no ack is expected.
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
334*4882a593Smuzhiyun */
rpmh_write_batch(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 * n)335*4882a593Smuzhiyun int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
336*4882a593Smuzhiyun const struct tcs_cmd *cmd, u32 *n)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct batch_cache_req *req;
339*4882a593Smuzhiyun struct rpmh_request *rpm_msgs;
340*4882a593Smuzhiyun struct completion *compls;
341*4882a593Smuzhiyun struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
342*4882a593Smuzhiyun unsigned long time_left;
343*4882a593Smuzhiyun int count = 0;
344*4882a593Smuzhiyun int ret, i;
345*4882a593Smuzhiyun void *ptr;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!cmd || !n)
348*4882a593Smuzhiyun return -EINVAL;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun while (n[count] > 0)
351*4882a593Smuzhiyun count++;
352*4882a593Smuzhiyun if (!count)
353*4882a593Smuzhiyun return -EINVAL;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun ptr = kzalloc(sizeof(*req) +
356*4882a593Smuzhiyun count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
357*4882a593Smuzhiyun GFP_ATOMIC);
358*4882a593Smuzhiyun if (!ptr)
359*4882a593Smuzhiyun return -ENOMEM;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun req = ptr;
362*4882a593Smuzhiyun compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun req->count = count;
365*4882a593Smuzhiyun rpm_msgs = req->rpm_msgs;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun for (i = 0; i < count; i++) {
368*4882a593Smuzhiyun __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
369*4882a593Smuzhiyun cmd += n[i];
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (state != RPMH_ACTIVE_ONLY_STATE) {
373*4882a593Smuzhiyun cache_batch(ctrlr, req);
374*4882a593Smuzhiyun return 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun for (i = 0; i < count; i++) {
378*4882a593Smuzhiyun struct completion *compl = &compls[i];
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun init_completion(compl);
381*4882a593Smuzhiyun rpm_msgs[i].completion = compl;
382*4882a593Smuzhiyun ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
383*4882a593Smuzhiyun if (ret) {
384*4882a593Smuzhiyun pr_err("Error(%d) sending RPMH message addr=%#x\n",
385*4882a593Smuzhiyun ret, rpm_msgs[i].msg.cmds[0].addr);
386*4882a593Smuzhiyun break;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun time_left = RPMH_TIMEOUT_MS;
391*4882a593Smuzhiyun while (i--) {
392*4882a593Smuzhiyun time_left = wait_for_completion_timeout(&compls[i], time_left);
393*4882a593Smuzhiyun if (!time_left) {
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * Better hope they never finish because they'll signal
396*4882a593Smuzhiyun * the completion that we're going to free once
397*4882a593Smuzhiyun * we've returned from this function.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun WARN_ON(1);
400*4882a593Smuzhiyun ret = -ETIMEDOUT;
401*4882a593Smuzhiyun goto exit;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun exit:
406*4882a593Smuzhiyun kfree(ptr);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return ret;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun EXPORT_SYMBOL(rpmh_write_batch);
411*4882a593Smuzhiyun
is_req_valid(struct cache_req * req)412*4882a593Smuzhiyun static int is_req_valid(struct cache_req *req)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun return (req->sleep_val != UINT_MAX &&
415*4882a593Smuzhiyun req->wake_val != UINT_MAX &&
416*4882a593Smuzhiyun req->sleep_val != req->wake_val);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
send_single(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,u32 addr,u32 data)419*4882a593Smuzhiyun static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
420*4882a593Smuzhiyun u32 addr, u32 data)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* Wake sets are always complete and sleep sets are not */
425*4882a593Smuzhiyun rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
426*4882a593Smuzhiyun rpm_msg.cmd[0].addr = addr;
427*4882a593Smuzhiyun rpm_msg.cmd[0].data = data;
428*4882a593Smuzhiyun rpm_msg.msg.num_cmds = 1;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /**
434*4882a593Smuzhiyun * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * @ctrlr: Controller making request to flush cached data
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * Return:
439*4882a593Smuzhiyun * * 0 - Success
440*4882a593Smuzhiyun * * Error code - Otherwise
441*4882a593Smuzhiyun */
rpmh_flush(struct rpmh_ctrlr * ctrlr)442*4882a593Smuzhiyun int rpmh_flush(struct rpmh_ctrlr *ctrlr)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun struct cache_req *p;
445*4882a593Smuzhiyun int ret = 0;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun lockdep_assert_irqs_disabled();
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * Currently rpmh_flush() is only called when we think we're running
451*4882a593Smuzhiyun * on the last processor. If the lock is busy it means another
452*4882a593Smuzhiyun * processor is up and it's better to abort than spin.
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun if (!spin_trylock(&ctrlr->cache_lock))
455*4882a593Smuzhiyun return -EBUSY;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (!ctrlr->dirty) {
458*4882a593Smuzhiyun pr_debug("Skipping flush, TCS has latest data.\n");
459*4882a593Smuzhiyun goto exit;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Invalidate the TCSes first to avoid stale data */
463*4882a593Smuzhiyun rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* First flush the cached batch requests */
466*4882a593Smuzhiyun ret = flush_batch(ctrlr);
467*4882a593Smuzhiyun if (ret)
468*4882a593Smuzhiyun goto exit;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun list_for_each_entry(p, &ctrlr->cache, list) {
471*4882a593Smuzhiyun if (!is_req_valid(p)) {
472*4882a593Smuzhiyun pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
473*4882a593Smuzhiyun __func__, p->addr, p->sleep_val, p->wake_val);
474*4882a593Smuzhiyun continue;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
477*4882a593Smuzhiyun p->sleep_val);
478*4882a593Smuzhiyun if (ret)
479*4882a593Smuzhiyun goto exit;
480*4882a593Smuzhiyun ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
481*4882a593Smuzhiyun p->wake_val);
482*4882a593Smuzhiyun if (ret)
483*4882a593Smuzhiyun goto exit;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun ctrlr->dirty = false;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun exit:
489*4882a593Smuzhiyun spin_unlock(&ctrlr->cache_lock);
490*4882a593Smuzhiyun return ret;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /**
494*4882a593Smuzhiyun * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
495*4882a593Smuzhiyun *
496*4882a593Smuzhiyun * @dev: The device making the request
497*4882a593Smuzhiyun *
498*4882a593Smuzhiyun * Invalidate the sleep and wake values in batch_cache.
499*4882a593Smuzhiyun */
rpmh_invalidate(const struct device * dev)500*4882a593Smuzhiyun void rpmh_invalidate(const struct device *dev)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
503*4882a593Smuzhiyun struct batch_cache_req *req, *tmp;
504*4882a593Smuzhiyun unsigned long flags;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun spin_lock_irqsave(&ctrlr->cache_lock, flags);
507*4882a593Smuzhiyun list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
508*4882a593Smuzhiyun kfree(req);
509*4882a593Smuzhiyun INIT_LIST_HEAD(&ctrlr->batch_cache);
510*4882a593Smuzhiyun ctrlr->dirty = true;
511*4882a593Smuzhiyun spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun EXPORT_SYMBOL(rpmh_invalidate);
514