1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * System Control and Management Interface (SCMI) Message Protocol driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * SCMI Message Protocol is used between the System Control Processor(SCP)
6*4882a593Smuzhiyun * and the Application Processors(AP). The Message Handling Unit(MHU)
7*4882a593Smuzhiyun * provides a mechanism for inter-processor communication between SCP's
8*4882a593Smuzhiyun * Cortex M3 and AP.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * SCP offers control and management of the core/cluster power states,
11*4882a593Smuzhiyun * various power domain DVFS including the core/cluster, certain system
12*4882a593Smuzhiyun * clocks configuration, thermal sensors and many others.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Copyright (C) 2018-2020 ARM Ltd.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/bitmap.h>
18*4882a593Smuzhiyun #include <linux/device.h>
19*4882a593Smuzhiyun #include <linux/export.h>
20*4882a593Smuzhiyun #include <linux/idr.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/kernel.h>
23*4882a593Smuzhiyun #include <linux/ktime.h>
24*4882a593Smuzhiyun #include <linux/list.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/of_address.h>
27*4882a593Smuzhiyun #include <linux/of_device.h>
28*4882a593Smuzhiyun #include <linux/processor.h>
29*4882a593Smuzhiyun #include <linux/refcount.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "common.h"
33*4882a593Smuzhiyun #include "notify.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
36*4882a593Smuzhiyun #include <trace/events/scmi.h>
37*4882a593Smuzhiyun #undef CREATE_TRACE_POINTS
38*4882a593Smuzhiyun #include <trace/hooks/scmi.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun enum scmi_error_codes {
41*4882a593Smuzhiyun SCMI_SUCCESS = 0, /* Success */
42*4882a593Smuzhiyun SCMI_ERR_SUPPORT = -1, /* Not supported */
43*4882a593Smuzhiyun SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
44*4882a593Smuzhiyun SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
45*4882a593Smuzhiyun SCMI_ERR_ENTRY = -4, /* Not found */
46*4882a593Smuzhiyun SCMI_ERR_RANGE = -5, /* Value out of range */
47*4882a593Smuzhiyun SCMI_ERR_BUSY = -6, /* Device busy */
48*4882a593Smuzhiyun SCMI_ERR_COMMS = -7, /* Communication Error */
49*4882a593Smuzhiyun SCMI_ERR_GENERIC = -8, /* Generic Error */
50*4882a593Smuzhiyun SCMI_ERR_HARDWARE = -9, /* Hardware Error */
51*4882a593Smuzhiyun SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* List of all SCMI devices active in system */
55*4882a593Smuzhiyun static LIST_HEAD(scmi_list);
56*4882a593Smuzhiyun /* Protection for the entire list */
57*4882a593Smuzhiyun static DEFINE_MUTEX(scmi_list_mutex);
58*4882a593Smuzhiyun /* Track the unique id for the transfers for debug & profiling purpose */
59*4882a593Smuzhiyun static atomic_t transfer_last_id;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static DEFINE_IDR(scmi_requested_devices);
62*4882a593Smuzhiyun static DEFINE_MUTEX(scmi_requested_devices_mtx);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun struct scmi_requested_dev {
65*4882a593Smuzhiyun const struct scmi_device_id *id_table;
66*4882a593Smuzhiyun struct list_head node;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /**
70*4882a593Smuzhiyun * struct scmi_xfers_info - Structure to manage transfer information
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * @xfer_block: Preallocated Message array
73*4882a593Smuzhiyun * @xfer_alloc_table: Bitmap table for allocated messages.
74*4882a593Smuzhiyun * Index of this bitmap table is also used for message
75*4882a593Smuzhiyun * sequence identifier.
76*4882a593Smuzhiyun * @xfer_lock: Protection for message allocation
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun struct scmi_xfers_info {
79*4882a593Smuzhiyun struct scmi_xfer *xfer_block;
80*4882a593Smuzhiyun unsigned long *xfer_alloc_table;
81*4882a593Smuzhiyun spinlock_t xfer_lock;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun * struct scmi_protocol_instance - Describe an initialized protocol instance.
86*4882a593Smuzhiyun * @handle: Reference to the SCMI handle associated to this protocol instance.
87*4882a593Smuzhiyun * @proto: A reference to the protocol descriptor.
88*4882a593Smuzhiyun * @gid: A reference for per-protocol devres management.
89*4882a593Smuzhiyun * @users: A refcount to track effective users of this protocol.
90*4882a593Smuzhiyun * @priv: Reference for optional protocol private data.
91*4882a593Smuzhiyun * @ph: An embedded protocol handle that will be passed down to protocol
92*4882a593Smuzhiyun * initialization code to identify this instance.
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * Each protocol is initialized independently once for each SCMI platform in
95*4882a593Smuzhiyun * which is defined by DT and implemented by the SCMI server fw.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun struct scmi_protocol_instance {
98*4882a593Smuzhiyun const struct scmi_handle *handle;
99*4882a593Smuzhiyun const struct scmi_protocol *proto;
100*4882a593Smuzhiyun void *gid;
101*4882a593Smuzhiyun refcount_t users;
102*4882a593Smuzhiyun void *priv;
103*4882a593Smuzhiyun struct scmi_protocol_handle ph;
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun * struct scmi_info - Structure representing a SCMI instance
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * @dev: Device pointer
112*4882a593Smuzhiyun * @desc: SoC description for this instance
113*4882a593Smuzhiyun * @version: SCMI revision information containing protocol version,
114*4882a593Smuzhiyun * implementation version and (sub-)vendor identification.
115*4882a593Smuzhiyun * @handle: Instance of SCMI handle to send to clients
116*4882a593Smuzhiyun * @tx_minfo: Universal Transmit Message management info
117*4882a593Smuzhiyun * @rx_minfo: Universal Receive Message management info
118*4882a593Smuzhiyun * @tx_idr: IDR object to map protocol id to Tx channel info pointer
119*4882a593Smuzhiyun * @rx_idr: IDR object to map protocol id to Rx channel info pointer
120*4882a593Smuzhiyun * @protocols: IDR for protocols' instance descriptors initialized for
121*4882a593Smuzhiyun * this SCMI instance: populated on protocol's first attempted
122*4882a593Smuzhiyun * usage.
123*4882a593Smuzhiyun * @protocols_mtx: A mutex to protect protocols instances initialization.
124*4882a593Smuzhiyun * @protocols_imp: List of protocols implemented, currently maximum of
125*4882a593Smuzhiyun * MAX_PROTOCOLS_IMP elements allocated by the base protocol
126*4882a593Smuzhiyun * @active_protocols: IDR storing device_nodes for protocols actually defined
127*4882a593Smuzhiyun * in the DT and confirmed as implemented by fw.
128*4882a593Smuzhiyun * @notify_priv: Pointer to private data structure specific to notifications.
129*4882a593Smuzhiyun * @node: List head
130*4882a593Smuzhiyun * @users: Number of users of this instance
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun struct scmi_info {
133*4882a593Smuzhiyun struct device *dev;
134*4882a593Smuzhiyun const struct scmi_desc *desc;
135*4882a593Smuzhiyun struct scmi_revision_info version;
136*4882a593Smuzhiyun struct scmi_handle handle;
137*4882a593Smuzhiyun struct scmi_xfers_info tx_minfo;
138*4882a593Smuzhiyun struct scmi_xfers_info rx_minfo;
139*4882a593Smuzhiyun struct idr tx_idr;
140*4882a593Smuzhiyun struct idr rx_idr;
141*4882a593Smuzhiyun struct idr protocols;
142*4882a593Smuzhiyun /* Ensure mutual exclusive access to protocols instance array */
143*4882a593Smuzhiyun struct mutex protocols_mtx;
144*4882a593Smuzhiyun u8 *protocols_imp;
145*4882a593Smuzhiyun struct idr active_protocols;
146*4882a593Smuzhiyun void *notify_priv;
147*4882a593Smuzhiyun struct list_head node;
148*4882a593Smuzhiyun int users;
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun static const int scmi_linux_errmap[] = {
154*4882a593Smuzhiyun /* better than switch case as long as return value is continuous */
155*4882a593Smuzhiyun 0, /* SCMI_SUCCESS */
156*4882a593Smuzhiyun -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
157*4882a593Smuzhiyun -EINVAL, /* SCMI_ERR_PARAM */
158*4882a593Smuzhiyun -EACCES, /* SCMI_ERR_ACCESS */
159*4882a593Smuzhiyun -ENOENT, /* SCMI_ERR_ENTRY */
160*4882a593Smuzhiyun -ERANGE, /* SCMI_ERR_RANGE */
161*4882a593Smuzhiyun -EBUSY, /* SCMI_ERR_BUSY */
162*4882a593Smuzhiyun -ECOMM, /* SCMI_ERR_COMMS */
163*4882a593Smuzhiyun -EIO, /* SCMI_ERR_GENERIC */
164*4882a593Smuzhiyun -EREMOTEIO, /* SCMI_ERR_HARDWARE */
165*4882a593Smuzhiyun -EPROTO, /* SCMI_ERR_PROTOCOL */
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun
scmi_to_linux_errno(int errno)168*4882a593Smuzhiyun static inline int scmi_to_linux_errno(int errno)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun int err_idx = -errno;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
173*4882a593Smuzhiyun return scmi_linux_errmap[err_idx];
174*4882a593Smuzhiyun return -EIO;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun * scmi_dump_header_dbg() - Helper to dump a message header.
179*4882a593Smuzhiyun *
180*4882a593Smuzhiyun * @dev: Device pointer corresponding to the SCMI entity
181*4882a593Smuzhiyun * @hdr: pointer to header.
182*4882a593Smuzhiyun */
scmi_dump_header_dbg(struct device * dev,struct scmi_msg_hdr * hdr)183*4882a593Smuzhiyun static inline void scmi_dump_header_dbg(struct device *dev,
184*4882a593Smuzhiyun struct scmi_msg_hdr *hdr)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
187*4882a593Smuzhiyun hdr->id, hdr->seq, hdr->protocol_id);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
scmi_set_notification_instance_data(const struct scmi_handle * handle,void * priv)190*4882a593Smuzhiyun void scmi_set_notification_instance_data(const struct scmi_handle *handle,
191*4882a593Smuzhiyun void *priv)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(handle);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun info->notify_priv = priv;
196*4882a593Smuzhiyun /* Ensure updated protocol private date are visible */
197*4882a593Smuzhiyun smp_wmb();
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
scmi_get_notification_instance_data(const struct scmi_handle * handle)200*4882a593Smuzhiyun void *scmi_get_notification_instance_data(const struct scmi_handle *handle)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(handle);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Ensure protocols_private_data has been updated */
205*4882a593Smuzhiyun smp_rmb();
206*4882a593Smuzhiyun return info->notify_priv;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun * scmi_xfer_get() - Allocate one message
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * @handle: Pointer to SCMI entity handle
213*4882a593Smuzhiyun * @minfo: Pointer to Tx/Rx Message management info based on channel type
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun * Helper function which is used by various message functions that are
216*4882a593Smuzhiyun * exposed to clients of this driver for allocating a message traffic event.
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * This function can sleep depending on pending requests already in the system
219*4882a593Smuzhiyun * for the SCMI entity. Further, this also holds a spinlock to maintain
220*4882a593Smuzhiyun * integrity of internal data structures.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * Return: 0 if all went fine, else corresponding error.
223*4882a593Smuzhiyun */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo)224*4882a593Smuzhiyun static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
225*4882a593Smuzhiyun struct scmi_xfers_info *minfo)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun u16 xfer_id;
228*4882a593Smuzhiyun struct scmi_xfer *xfer;
229*4882a593Smuzhiyun unsigned long flags, bit_pos;
230*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(handle);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Keep the locked section as small as possible */
233*4882a593Smuzhiyun spin_lock_irqsave(&minfo->xfer_lock, flags);
234*4882a593Smuzhiyun bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
235*4882a593Smuzhiyun info->desc->max_msg);
236*4882a593Smuzhiyun if (bit_pos == info->desc->max_msg) {
237*4882a593Smuzhiyun spin_unlock_irqrestore(&minfo->xfer_lock, flags);
238*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun set_bit(bit_pos, minfo->xfer_alloc_table);
241*4882a593Smuzhiyun spin_unlock_irqrestore(&minfo->xfer_lock, flags);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun xfer_id = bit_pos;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun xfer = &minfo->xfer_block[xfer_id];
246*4882a593Smuzhiyun xfer->hdr.seq = xfer_id;
247*4882a593Smuzhiyun reinit_completion(&xfer->done);
248*4882a593Smuzhiyun xfer->transfer_id = atomic_inc_return(&transfer_last_id);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return xfer;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /**
254*4882a593Smuzhiyun * __scmi_xfer_put() - Release a message
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * @minfo: Pointer to Tx/Rx Message management info based on channel type
257*4882a593Smuzhiyun * @xfer: message that was reserved by scmi_xfer_get
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * This holds a spinlock to maintain integrity of internal data structures.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)262*4882a593Smuzhiyun __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun unsigned long flags;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Keep the locked section as small as possible
268*4882a593Smuzhiyun * NOTE: we might escape with smp_mb and no lock here..
269*4882a593Smuzhiyun * but just be conservative and symmetric.
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun spin_lock_irqsave(&minfo->xfer_lock, flags);
272*4882a593Smuzhiyun clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
273*4882a593Smuzhiyun spin_unlock_irqrestore(&minfo->xfer_lock, flags);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr)276*4882a593Smuzhiyun static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct scmi_xfer *xfer;
279*4882a593Smuzhiyun struct device *dev = cinfo->dev;
280*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
281*4882a593Smuzhiyun struct scmi_xfers_info *minfo = &info->rx_minfo;
282*4882a593Smuzhiyun ktime_t ts;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun ts = ktime_get_boottime();
285*4882a593Smuzhiyun xfer = scmi_xfer_get(cinfo->handle, minfo);
286*4882a593Smuzhiyun if (IS_ERR(xfer)) {
287*4882a593Smuzhiyun dev_err(dev, "failed to get free message slot (%ld)\n",
288*4882a593Smuzhiyun PTR_ERR(xfer));
289*4882a593Smuzhiyun info->desc->ops->clear_channel(cinfo);
290*4882a593Smuzhiyun return;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun unpack_scmi_header(msg_hdr, &xfer->hdr);
294*4882a593Smuzhiyun scmi_dump_header_dbg(dev, &xfer->hdr);
295*4882a593Smuzhiyun info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
296*4882a593Smuzhiyun xfer);
297*4882a593Smuzhiyun scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
298*4882a593Smuzhiyun xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
301*4882a593Smuzhiyun xfer->hdr.protocol_id, xfer->hdr.seq,
302*4882a593Smuzhiyun MSG_TYPE_NOTIFICATION);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun __scmi_xfer_put(minfo, xfer);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun info->desc->ops->clear_channel(cinfo);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
scmi_handle_response(struct scmi_chan_info * cinfo,u16 xfer_id,u8 msg_type)309*4882a593Smuzhiyun static void scmi_handle_response(struct scmi_chan_info *cinfo,
310*4882a593Smuzhiyun u16 xfer_id, u8 msg_type)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct scmi_xfer *xfer;
313*4882a593Smuzhiyun struct device *dev = cinfo->dev;
314*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
315*4882a593Smuzhiyun struct scmi_xfers_info *minfo = &info->tx_minfo;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Are we even expecting this? */
318*4882a593Smuzhiyun if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
319*4882a593Smuzhiyun dev_err(dev, "message for %d is not expected!\n", xfer_id);
320*4882a593Smuzhiyun info->desc->ops->clear_channel(cinfo);
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun xfer = &minfo->xfer_block[xfer_id];
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Even if a response was indeed expected on this slot at this point,
327*4882a593Smuzhiyun * a buggy platform could wrongly reply feeding us an unexpected
328*4882a593Smuzhiyun * delayed response we're not prepared to handle: bail-out safely
329*4882a593Smuzhiyun * blaming firmware.
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
332*4882a593Smuzhiyun dev_err(dev,
333*4882a593Smuzhiyun "Delayed Response for %d not expected! Buggy F/W ?\n",
334*4882a593Smuzhiyun xfer_id);
335*4882a593Smuzhiyun info->desc->ops->clear_channel(cinfo);
336*4882a593Smuzhiyun /* It was unexpected, so nobody will clear the xfer if not us */
337*4882a593Smuzhiyun __scmi_xfer_put(minfo, xfer);
338*4882a593Smuzhiyun return;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
342*4882a593Smuzhiyun if (msg_type == MSG_TYPE_DELAYED_RESP)
343*4882a593Smuzhiyun xfer->rx.len = info->desc->max_msg_size;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun scmi_dump_header_dbg(dev, &xfer->hdr);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun info->desc->ops->fetch_response(cinfo, xfer);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
350*4882a593Smuzhiyun xfer->hdr.protocol_id, xfer->hdr.seq,
351*4882a593Smuzhiyun msg_type);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (msg_type == MSG_TYPE_DELAYED_RESP) {
354*4882a593Smuzhiyun info->desc->ops->clear_channel(cinfo);
355*4882a593Smuzhiyun complete(xfer->async_done);
356*4882a593Smuzhiyun } else {
357*4882a593Smuzhiyun complete(&xfer->done);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /**
362*4882a593Smuzhiyun * scmi_rx_callback() - callback for receiving messages
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * @cinfo: SCMI channel info
365*4882a593Smuzhiyun * @msg_hdr: Message header
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * Processes one received message to appropriate transfer information and
368*4882a593Smuzhiyun * signals completion of the transfer.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * NOTE: This function will be invoked in IRQ context, hence should be
371*4882a593Smuzhiyun * as optimal as possible.
372*4882a593Smuzhiyun */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr)373*4882a593Smuzhiyun void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
376*4882a593Smuzhiyun u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun switch (msg_type) {
379*4882a593Smuzhiyun case MSG_TYPE_NOTIFICATION:
380*4882a593Smuzhiyun scmi_handle_notification(cinfo, msg_hdr);
381*4882a593Smuzhiyun break;
382*4882a593Smuzhiyun case MSG_TYPE_COMMAND:
383*4882a593Smuzhiyun case MSG_TYPE_DELAYED_RESP:
384*4882a593Smuzhiyun scmi_handle_response(cinfo, xfer_id, msg_type);
385*4882a593Smuzhiyun break;
386*4882a593Smuzhiyun default:
387*4882a593Smuzhiyun WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
388*4882a593Smuzhiyun break;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * xfer_put() - Release a transmit message
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * @ph: Pointer to SCMI protocol handle
396*4882a593Smuzhiyun * @xfer: message that was reserved by scmi_xfer_get
397*4882a593Smuzhiyun */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)398*4882a593Smuzhiyun static void xfer_put(const struct scmi_protocol_handle *ph,
399*4882a593Smuzhiyun struct scmi_xfer *xfer)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
402*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(pi->handle);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun __scmi_xfer_put(&info->tx_minfo, xfer);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
408*4882a593Smuzhiyun
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop)409*4882a593Smuzhiyun static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
410*4882a593Smuzhiyun struct scmi_xfer *xfer, ktime_t stop)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun return info->desc->ops->poll_done(cinfo, xfer) ||
415*4882a593Smuzhiyun ktime_after(ktime_get(), stop);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /**
419*4882a593Smuzhiyun * do_xfer() - Do one transfer
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * @ph: Pointer to SCMI protocol handle
422*4882a593Smuzhiyun * @xfer: Transfer to initiate and wait for response
423*4882a593Smuzhiyun *
424*4882a593Smuzhiyun * Return: -ETIMEDOUT in case of no response, if transmit error,
425*4882a593Smuzhiyun * return corresponding error, else if all goes well,
426*4882a593Smuzhiyun * return 0.
427*4882a593Smuzhiyun */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)428*4882a593Smuzhiyun static int do_xfer(const struct scmi_protocol_handle *ph,
429*4882a593Smuzhiyun struct scmi_xfer *xfer)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun int ret;
432*4882a593Smuzhiyun int timeout;
433*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
434*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(pi->handle);
435*4882a593Smuzhiyun struct device *dev = info->dev;
436*4882a593Smuzhiyun struct scmi_chan_info *cinfo;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun * Re-instate protocol id here from protocol handle so that cannot be
440*4882a593Smuzhiyun * overridden by mistake (or malice) by the protocol code mangling with
441*4882a593Smuzhiyun * the scmi_xfer structure.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun xfer->hdr.protocol_id = pi->proto->id;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
446*4882a593Smuzhiyun if (unlikely(!cinfo))
447*4882a593Smuzhiyun return -EINVAL;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
450*4882a593Smuzhiyun xfer->hdr.protocol_id, xfer->hdr.seq,
451*4882a593Smuzhiyun xfer->hdr.poll_completion);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun ret = info->desc->ops->send_message(cinfo, xfer);
454*4882a593Smuzhiyun if (ret < 0) {
455*4882a593Smuzhiyun dev_dbg(dev, "Failed to send message %d\n", ret);
456*4882a593Smuzhiyun return ret;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (xfer->hdr.poll_completion) {
460*4882a593Smuzhiyun ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (ktime_before(ktime_get(), stop))
465*4882a593Smuzhiyun info->desc->ops->fetch_response(cinfo, xfer);
466*4882a593Smuzhiyun else
467*4882a593Smuzhiyun ret = -ETIMEDOUT;
468*4882a593Smuzhiyun } else {
469*4882a593Smuzhiyun /* And we wait for the response. */
470*4882a593Smuzhiyun timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
471*4882a593Smuzhiyun trace_android_vh_scmi_timeout_sync(&timeout);
472*4882a593Smuzhiyun if (!wait_for_completion_timeout(&xfer->done, timeout)) {
473*4882a593Smuzhiyun dev_err(dev, "timed out in resp(caller: %pS)\n",
474*4882a593Smuzhiyun (void *)_RET_IP_);
475*4882a593Smuzhiyun ret = -ETIMEDOUT;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (!ret && xfer->hdr.status)
480*4882a593Smuzhiyun ret = scmi_to_linux_errno(xfer->hdr.status);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (info->desc->ops->mark_txdone)
483*4882a593Smuzhiyun info->desc->ops->mark_txdone(cinfo, ret);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
486*4882a593Smuzhiyun xfer->hdr.protocol_id, xfer->hdr.seq, ret);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)491*4882a593Smuzhiyun static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
492*4882a593Smuzhiyun struct scmi_xfer *xfer)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
495*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(pi->handle);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun xfer->rx.len = info->desc->max_msg_size;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * do_xfer_with_response() - Do one transfer and wait until the delayed
504*4882a593Smuzhiyun * response is received
505*4882a593Smuzhiyun *
506*4882a593Smuzhiyun * @ph: Pointer to SCMI protocol handle
507*4882a593Smuzhiyun * @xfer: Transfer to initiate and wait for response
508*4882a593Smuzhiyun *
509*4882a593Smuzhiyun * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
510*4882a593Smuzhiyun * return corresponding error, else if all goes well, return 0.
511*4882a593Smuzhiyun */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)512*4882a593Smuzhiyun static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
513*4882a593Smuzhiyun struct scmi_xfer *xfer)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
516*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
517*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(async_response);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun xfer->hdr.protocol_id = pi->proto->id;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun xfer->async_done = &async_response;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun ret = do_xfer(ph, xfer);
524*4882a593Smuzhiyun if (!ret) {
525*4882a593Smuzhiyun if (!wait_for_completion_timeout(xfer->async_done, timeout))
526*4882a593Smuzhiyun ret = -ETIMEDOUT;
527*4882a593Smuzhiyun else if (xfer->hdr.status)
528*4882a593Smuzhiyun ret = scmi_to_linux_errno(xfer->hdr.status);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun xfer->async_done = NULL;
532*4882a593Smuzhiyun return ret;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun * xfer_get_init() - Allocate and initialise one message for transmit
537*4882a593Smuzhiyun *
538*4882a593Smuzhiyun * @ph: Pointer to SCMI protocol handle
539*4882a593Smuzhiyun * @msg_id: Message identifier
540*4882a593Smuzhiyun * @tx_size: transmit message size
541*4882a593Smuzhiyun * @rx_size: receive message size
542*4882a593Smuzhiyun * @p: pointer to the allocated and initialised message
543*4882a593Smuzhiyun *
544*4882a593Smuzhiyun * This function allocates the message using @scmi_xfer_get and
545*4882a593Smuzhiyun * initialise the header.
546*4882a593Smuzhiyun *
547*4882a593Smuzhiyun * Return: 0 if all went fine with @p pointing to message, else
548*4882a593Smuzhiyun * corresponding error.
549*4882a593Smuzhiyun */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)550*4882a593Smuzhiyun static int xfer_get_init(const struct scmi_protocol_handle *ph,
551*4882a593Smuzhiyun u8 msg_id, size_t tx_size, size_t rx_size,
552*4882a593Smuzhiyun struct scmi_xfer **p)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun int ret;
555*4882a593Smuzhiyun struct scmi_xfer *xfer;
556*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
557*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(pi->handle);
558*4882a593Smuzhiyun struct scmi_xfers_info *minfo = &info->tx_minfo;
559*4882a593Smuzhiyun struct device *dev = info->dev;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Ensure we have sane transfer sizes */
562*4882a593Smuzhiyun if (rx_size > info->desc->max_msg_size ||
563*4882a593Smuzhiyun tx_size > info->desc->max_msg_size)
564*4882a593Smuzhiyun return -ERANGE;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun xfer = scmi_xfer_get(pi->handle, minfo);
567*4882a593Smuzhiyun if (IS_ERR(xfer)) {
568*4882a593Smuzhiyun ret = PTR_ERR(xfer);
569*4882a593Smuzhiyun dev_err(dev, "failed to get free message slot(%d)\n", ret);
570*4882a593Smuzhiyun return ret;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun xfer->tx.len = tx_size;
574*4882a593Smuzhiyun xfer->rx.len = rx_size ? : info->desc->max_msg_size;
575*4882a593Smuzhiyun xfer->hdr.id = msg_id;
576*4882a593Smuzhiyun xfer->hdr.protocol_id = pi->proto->id;
577*4882a593Smuzhiyun xfer->hdr.poll_completion = false;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun *p = xfer;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return 0;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /**
585*4882a593Smuzhiyun * version_get() - command to get the revision of the SCMI entity
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * @ph: Pointer to SCMI protocol handle
588*4882a593Smuzhiyun * @version: Holds returned version of protocol.
589*4882a593Smuzhiyun *
590*4882a593Smuzhiyun * Updates the SCMI information in the internal data structure.
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * Return: 0 if all went fine, else return appropriate error.
593*4882a593Smuzhiyun */
version_get(const struct scmi_protocol_handle * ph,u32 * version)594*4882a593Smuzhiyun static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun int ret;
597*4882a593Smuzhiyun __le32 *rev_info;
598*4882a593Smuzhiyun struct scmi_xfer *t;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
601*4882a593Smuzhiyun if (ret)
602*4882a593Smuzhiyun return ret;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun ret = do_xfer(ph, t);
605*4882a593Smuzhiyun if (!ret) {
606*4882a593Smuzhiyun rev_info = t->rx.buf;
607*4882a593Smuzhiyun *version = le32_to_cpu(*rev_info);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun xfer_put(ph, t);
611*4882a593Smuzhiyun return ret;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /**
615*4882a593Smuzhiyun * scmi_set_protocol_priv - Set protocol specific data at init time
616*4882a593Smuzhiyun *
617*4882a593Smuzhiyun * @ph: A reference to the protocol handle.
618*4882a593Smuzhiyun * @priv: The private data to set.
619*4882a593Smuzhiyun *
620*4882a593Smuzhiyun * Return: 0 on Success
621*4882a593Smuzhiyun */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv)622*4882a593Smuzhiyun static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
623*4882a593Smuzhiyun void *priv)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun struct scmi_protocol_instance *pi = ph_to_pi(ph);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun pi->priv = priv;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun return 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /**
633*4882a593Smuzhiyun * scmi_get_protocol_priv - Set protocol specific data at init time
634*4882a593Smuzhiyun *
635*4882a593Smuzhiyun * @ph: A reference to the protocol handle.
636*4882a593Smuzhiyun *
637*4882a593Smuzhiyun * Return: Protocol private data if any was set.
638*4882a593Smuzhiyun */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)639*4882a593Smuzhiyun static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun return pi->priv;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun static const struct scmi_xfer_ops xfer_ops = {
647*4882a593Smuzhiyun .version_get = version_get,
648*4882a593Smuzhiyun .xfer_get_init = xfer_get_init,
649*4882a593Smuzhiyun .reset_rx_to_maxsz = reset_rx_to_maxsz,
650*4882a593Smuzhiyun .do_xfer = do_xfer,
651*4882a593Smuzhiyun .do_xfer_with_response = do_xfer_with_response,
652*4882a593Smuzhiyun .xfer_put = xfer_put,
653*4882a593Smuzhiyun };
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /**
656*4882a593Smuzhiyun * scmi_get_revision_area - Retrieve version memory area.
657*4882a593Smuzhiyun *
658*4882a593Smuzhiyun * @ph: A reference to the protocol handle.
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * A helper to grab the version memory area reference during SCMI Base protocol
661*4882a593Smuzhiyun * initialization.
662*4882a593Smuzhiyun *
663*4882a593Smuzhiyun * Return: A reference to the version memory area associated to the SCMI
664*4882a593Smuzhiyun * instance underlying this protocol handle.
665*4882a593Smuzhiyun */
666*4882a593Smuzhiyun struct scmi_revision_info *
scmi_get_revision_area(const struct scmi_protocol_handle * ph)667*4882a593Smuzhiyun scmi_get_revision_area(const struct scmi_protocol_handle *ph)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return pi->handle->version;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /**
675*4882a593Smuzhiyun * scmi_get_protocol_instance - Protocol initialization helper.
676*4882a593Smuzhiyun * @handle: A reference to the SCMI platform instance.
677*4882a593Smuzhiyun * @protocol_id: The protocol being requested.
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * In case the required protocol has never been requested before for this
680*4882a593Smuzhiyun * instance, allocate and initialize all the needed structures while handling
681*4882a593Smuzhiyun * resource allocation with a dedicated per-protocol devres subgroup.
682*4882a593Smuzhiyun *
683*4882a593Smuzhiyun * Return: A reference to an initialized protocol instance or error on failure.
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)686*4882a593Smuzhiyun scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun int ret = -ENOMEM;
689*4882a593Smuzhiyun void *gid;
690*4882a593Smuzhiyun struct scmi_protocol_instance *pi;
691*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(handle);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun mutex_lock(&info->protocols_mtx);
694*4882a593Smuzhiyun pi = idr_find(&info->protocols, protocol_id);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (pi) {
697*4882a593Smuzhiyun refcount_inc(&pi->users);
698*4882a593Smuzhiyun } else {
699*4882a593Smuzhiyun const struct scmi_protocol *proto;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* Fail if protocol not registered on bus */
702*4882a593Smuzhiyun proto = scmi_get_protocol(protocol_id);
703*4882a593Smuzhiyun if (!proto) {
704*4882a593Smuzhiyun ret = -EPROBE_DEFER;
705*4882a593Smuzhiyun goto out;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Protocol specific devres group */
709*4882a593Smuzhiyun gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
710*4882a593Smuzhiyun if (!gid)
711*4882a593Smuzhiyun goto out;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
714*4882a593Smuzhiyun if (!pi)
715*4882a593Smuzhiyun goto clean;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun pi->gid = gid;
718*4882a593Smuzhiyun pi->proto = proto;
719*4882a593Smuzhiyun pi->handle = handle;
720*4882a593Smuzhiyun pi->ph.dev = handle->dev;
721*4882a593Smuzhiyun pi->ph.xops = &xfer_ops;
722*4882a593Smuzhiyun pi->ph.set_priv = scmi_set_protocol_priv;
723*4882a593Smuzhiyun pi->ph.get_priv = scmi_get_protocol_priv;
724*4882a593Smuzhiyun refcount_set(&pi->users, 1);
725*4882a593Smuzhiyun /* proto->init is assured NON NULL by scmi_protocol_register */
726*4882a593Smuzhiyun ret = pi->proto->init_instance(&pi->ph);
727*4882a593Smuzhiyun if (ret)
728*4882a593Smuzhiyun goto clean;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun ret = idr_alloc(&info->protocols, pi,
731*4882a593Smuzhiyun protocol_id, protocol_id + 1, GFP_KERNEL);
732*4882a593Smuzhiyun if (ret != protocol_id)
733*4882a593Smuzhiyun goto clean;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (pi->proto->events)
736*4882a593Smuzhiyun scmi_register_protocol_events(handle, pi->proto->id,
737*4882a593Smuzhiyun &pi->ph,
738*4882a593Smuzhiyun pi->proto->events);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun devres_close_group(handle->dev, pi->gid);
741*4882a593Smuzhiyun dev_dbg(handle->dev, "Initialized protocol: 0x%X\n",
742*4882a593Smuzhiyun protocol_id);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun mutex_unlock(&info->protocols_mtx);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun return pi;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun clean:
749*4882a593Smuzhiyun scmi_put_protocol(protocol_id);
750*4882a593Smuzhiyun devres_release_group(handle->dev, gid);
751*4882a593Smuzhiyun out:
752*4882a593Smuzhiyun mutex_unlock(&info->protocols_mtx);
753*4882a593Smuzhiyun return ERR_PTR(ret);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /**
757*4882a593Smuzhiyun * scmi_acquire_protocol - Protocol acquire
758*4882a593Smuzhiyun * @handle: A reference to the SCMI platform instance.
759*4882a593Smuzhiyun * @protocol_id: The protocol being requested.
760*4882a593Smuzhiyun *
761*4882a593Smuzhiyun * Register a new user for the requested protocol on the specified SCMI
762*4882a593Smuzhiyun * platform instance, possibly triggering its initialization on first user.
763*4882a593Smuzhiyun *
764*4882a593Smuzhiyun * Return: 0 if protocol was acquired successfully.
765*4882a593Smuzhiyun */
scmi_acquire_protocol(const struct scmi_handle * handle,u8 protocol_id)766*4882a593Smuzhiyun int scmi_acquire_protocol(const struct scmi_handle *handle, u8 protocol_id)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /**
772*4882a593Smuzhiyun * scmi_release_protocol - Protocol de-initialization helper.
773*4882a593Smuzhiyun * @handle: A reference to the SCMI platform instance.
774*4882a593Smuzhiyun * @protocol_id: The protocol being requested.
775*4882a593Smuzhiyun *
776*4882a593Smuzhiyun * Remove one user for the specified protocol and triggers de-initialization
777*4882a593Smuzhiyun * and resources de-allocation once the last user has gone.
778*4882a593Smuzhiyun */
scmi_release_protocol(const struct scmi_handle * handle,u8 protocol_id)779*4882a593Smuzhiyun void scmi_release_protocol(const struct scmi_handle *handle, u8 protocol_id)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(handle);
782*4882a593Smuzhiyun struct scmi_protocol_instance *pi;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun mutex_lock(&info->protocols_mtx);
785*4882a593Smuzhiyun pi = idr_find(&info->protocols, protocol_id);
786*4882a593Smuzhiyun if (WARN_ON(!pi))
787*4882a593Smuzhiyun goto out;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (refcount_dec_and_test(&pi->users)) {
790*4882a593Smuzhiyun void *gid = pi->gid;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if (pi->proto->events)
793*4882a593Smuzhiyun scmi_deregister_protocol_events(handle, protocol_id);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (pi->proto->deinit_instance)
796*4882a593Smuzhiyun pi->proto->deinit_instance(&pi->ph);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun idr_remove(&info->protocols, protocol_id);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun scmi_put_protocol(protocol_id);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun devres_release_group(handle->dev, gid);
803*4882a593Smuzhiyun dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
804*4882a593Smuzhiyun protocol_id);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun out:
808*4882a593Smuzhiyun mutex_unlock(&info->protocols_mtx);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)811*4882a593Smuzhiyun void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
812*4882a593Smuzhiyun u8 *prot_imp)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun const struct scmi_protocol_instance *pi = ph_to_pi(ph);
815*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(pi->handle);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun info->protocols_imp = prot_imp;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)821*4882a593Smuzhiyun scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun int i;
824*4882a593Smuzhiyun struct scmi_info *info = handle_to_scmi_info(handle);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (!info->protocols_imp)
827*4882a593Smuzhiyun return false;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
830*4882a593Smuzhiyun if (info->protocols_imp[i] == prot_id)
831*4882a593Smuzhiyun return true;
832*4882a593Smuzhiyun return false;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun struct scmi_protocol_devres {
836*4882a593Smuzhiyun const struct scmi_handle *handle;
837*4882a593Smuzhiyun u8 protocol_id;
838*4882a593Smuzhiyun };
839*4882a593Smuzhiyun
scmi_devm_release_protocol(struct device * dev,void * res)840*4882a593Smuzhiyun static void scmi_devm_release_protocol(struct device *dev, void *res)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct scmi_protocol_devres *dres = res;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun scmi_release_protocol(dres->handle, dres->protocol_id);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun static struct scmi_protocol_instance __must_check *
__scmi_devres_get_protocol_instance(struct scmi_device * sdev,u8 protocol_id)848*4882a593Smuzhiyun __scmi_devres_get_protocol_instance(struct scmi_device *sdev, u8 protocol_id)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun struct scmi_protocol_devres *dres;
851*4882a593Smuzhiyun struct scmi_protocol_instance *pi;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun dres = devres_alloc(scmi_devm_release_protocol,
854*4882a593Smuzhiyun sizeof(*dres), GFP_KERNEL);
855*4882a593Smuzhiyun if (!dres)
856*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
859*4882a593Smuzhiyun if (IS_ERR(pi)) {
860*4882a593Smuzhiyun devres_free(dres);
861*4882a593Smuzhiyun return pi;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun dres->handle = sdev->handle;
865*4882a593Smuzhiyun dres->protocol_id = protocol_id;
866*4882a593Smuzhiyun devres_add(&sdev->dev, dres);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return pi;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /**
872*4882a593Smuzhiyun * scmi_devm_get_protocol - Devres managed get protocol operations and handle
873*4882a593Smuzhiyun * @sdev: A reference to an scmi_device whose embedded struct device is to
874*4882a593Smuzhiyun * be used for devres accounting.
875*4882a593Smuzhiyun * @protocol_id: The protocol being requested.
876*4882a593Smuzhiyun * @ph: A pointer reference used to pass back the associated protocol handle.
877*4882a593Smuzhiyun *
878*4882a593Smuzhiyun * Get hold of a protocol accounting for its usage, eventually triggering its
879*4882a593Smuzhiyun * initialization, and returning the protocol specific operations and related
880*4882a593Smuzhiyun * protocol handle which will be used as first argument in most of the
881*4882a593Smuzhiyun * protocols operations methods.
882*4882a593Smuzhiyun * Being a devres based managed method, protocol hold will be automatically
883*4882a593Smuzhiyun * released, and possibly de-initialized on last user, once the SCMI driver
884*4882a593Smuzhiyun * owning the scmi_device is unbound from it.
885*4882a593Smuzhiyun *
886*4882a593Smuzhiyun * Return: A reference to the requested protocol operations or error.
887*4882a593Smuzhiyun * Must be checked for errors by caller.
888*4882a593Smuzhiyun */
889*4882a593Smuzhiyun static const void __must_check *
scmi_devm_get_protocol(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)890*4882a593Smuzhiyun scmi_devm_get_protocol(struct scmi_device *sdev, u8 protocol_id,
891*4882a593Smuzhiyun struct scmi_protocol_handle **ph)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun struct scmi_protocol_instance *pi;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun if (!ph)
896*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun pi = __scmi_devres_get_protocol_instance(sdev, protocol_id);
899*4882a593Smuzhiyun if (IS_ERR(pi))
900*4882a593Smuzhiyun return pi;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun *ph = &pi->ph;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun return pi->proto->ops;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /**
908*4882a593Smuzhiyun * scmi_devm_acquire_protocol - Devres managed helper to get hold of a protocol
909*4882a593Smuzhiyun * @sdev: A reference to an scmi_device whose embedded struct device is to
910*4882a593Smuzhiyun * be used for devres accounting.
911*4882a593Smuzhiyun * @protocol_id: The protocol being requested.
912*4882a593Smuzhiyun *
913*4882a593Smuzhiyun * Get hold of a protocol accounting for its usage, possibly triggering its
914*4882a593Smuzhiyun * initialization but without getting access to its protocol specific operations
915*4882a593Smuzhiyun * and handle.
916*4882a593Smuzhiyun *
917*4882a593Smuzhiyun * Being a devres based managed method, protocol hold will be automatically
918*4882a593Smuzhiyun * released, and possibly de-initialized on last user, once the SCMI driver
919*4882a593Smuzhiyun * owning the scmi_device is unbound from it.
920*4882a593Smuzhiyun *
921*4882a593Smuzhiyun * Return: 0 on SUCCESS
922*4882a593Smuzhiyun */
scmi_devm_acquire_protocol(struct scmi_device * sdev,u8 protocol_id)923*4882a593Smuzhiyun static int __must_check scmi_devm_acquire_protocol(struct scmi_device *sdev,
924*4882a593Smuzhiyun u8 protocol_id)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun struct scmi_protocol_instance *pi;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun pi = __scmi_devres_get_protocol_instance(sdev, protocol_id);
929*4882a593Smuzhiyun if (IS_ERR(pi))
930*4882a593Smuzhiyun return PTR_ERR(pi);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun return 0;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
scmi_devm_protocol_match(struct device * dev,void * res,void * data)935*4882a593Smuzhiyun static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct scmi_protocol_devres *dres = res;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (WARN_ON(!dres || !data))
940*4882a593Smuzhiyun return 0;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun return dres->protocol_id == *((u8 *)data);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /**
946*4882a593Smuzhiyun * scmi_devm_put_protocol - Devres managed put protocol operations and handle
947*4882a593Smuzhiyun * @sdev: A reference to an scmi_device whose embedded struct device is to
948*4882a593Smuzhiyun * be used for devres accounting.
949*4882a593Smuzhiyun * @protocol_id: The protocol being requested.
950*4882a593Smuzhiyun *
951*4882a593Smuzhiyun * Explicitly release a protocol hold previously obtained calling the above
952*4882a593Smuzhiyun * @scmi_devm_get_protocol_ops.
953*4882a593Smuzhiyun */
scmi_devm_put_protocol(struct scmi_device * sdev,u8 protocol_id)954*4882a593Smuzhiyun static void scmi_devm_put_protocol(struct scmi_device *sdev, u8 protocol_id)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun int ret;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
959*4882a593Smuzhiyun scmi_devm_protocol_match, &protocol_id);
960*4882a593Smuzhiyun WARN_ON(ret);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun static inline
scmi_handle_get_from_info(struct scmi_info * info)964*4882a593Smuzhiyun struct scmi_handle *scmi_handle_get_from_info(struct scmi_info *info)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun info->users++;
967*4882a593Smuzhiyun return &info->handle;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /**
971*4882a593Smuzhiyun * scmi_handle_get() - Get the SCMI handle for a device
972*4882a593Smuzhiyun *
973*4882a593Smuzhiyun * @dev: pointer to device for which we want SCMI handle
974*4882a593Smuzhiyun *
975*4882a593Smuzhiyun * NOTE: The function does not track individual clients of the framework
976*4882a593Smuzhiyun * and is expected to be maintained by caller of SCMI protocol library.
977*4882a593Smuzhiyun * scmi_handle_put must be balanced with successful scmi_handle_get
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * Return: pointer to handle if successful, NULL on error
980*4882a593Smuzhiyun */
scmi_handle_get(struct device * dev)981*4882a593Smuzhiyun struct scmi_handle *scmi_handle_get(struct device *dev)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun struct list_head *p;
984*4882a593Smuzhiyun struct scmi_info *info;
985*4882a593Smuzhiyun struct scmi_handle *handle = NULL;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun mutex_lock(&scmi_list_mutex);
988*4882a593Smuzhiyun list_for_each(p, &scmi_list) {
989*4882a593Smuzhiyun info = list_entry(p, struct scmi_info, node);
990*4882a593Smuzhiyun if (dev->parent == info->dev) {
991*4882a593Smuzhiyun handle = scmi_handle_get_from_info(info);
992*4882a593Smuzhiyun break;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun mutex_unlock(&scmi_list_mutex);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun return handle;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /**
1001*4882a593Smuzhiyun * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1002*4882a593Smuzhiyun *
1003*4882a593Smuzhiyun * @handle: handle acquired by scmi_handle_get
1004*4882a593Smuzhiyun *
1005*4882a593Smuzhiyun * NOTE: The function does not track individual clients of the framework
1006*4882a593Smuzhiyun * and is expected to be maintained by caller of SCMI protocol library.
1007*4882a593Smuzhiyun * scmi_handle_put must be balanced with successful scmi_handle_get
1008*4882a593Smuzhiyun *
1009*4882a593Smuzhiyun * Return: 0 is successfully released
1010*4882a593Smuzhiyun * if null was passed, it returns -EINVAL;
1011*4882a593Smuzhiyun */
scmi_handle_put(const struct scmi_handle * handle)1012*4882a593Smuzhiyun int scmi_handle_put(const struct scmi_handle *handle)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun struct scmi_info *info;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun if (!handle)
1017*4882a593Smuzhiyun return -EINVAL;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun info = handle_to_scmi_info(handle);
1020*4882a593Smuzhiyun mutex_lock(&scmi_list_mutex);
1021*4882a593Smuzhiyun if (!WARN_ON(!info->users))
1022*4882a593Smuzhiyun info->users--;
1023*4882a593Smuzhiyun mutex_unlock(&scmi_list_mutex);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun return 0;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)1028*4882a593Smuzhiyun static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1029*4882a593Smuzhiyun struct scmi_xfers_info *info)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun int i;
1032*4882a593Smuzhiyun struct scmi_xfer *xfer;
1033*4882a593Smuzhiyun struct device *dev = sinfo->dev;
1034*4882a593Smuzhiyun const struct scmi_desc *desc = sinfo->desc;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* Pre-allocated messages, no more than what hdr.seq can support */
1037*4882a593Smuzhiyun if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
1038*4882a593Smuzhiyun dev_err(dev,
1039*4882a593Smuzhiyun "Invalid maximum messages %d, not in range [1 - %lu]\n",
1040*4882a593Smuzhiyun desc->max_msg, MSG_TOKEN_MAX);
1041*4882a593Smuzhiyun return -EINVAL;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun info->xfer_block = devm_kcalloc(dev, desc->max_msg,
1045*4882a593Smuzhiyun sizeof(*info->xfer_block), GFP_KERNEL);
1046*4882a593Smuzhiyun if (!info->xfer_block)
1047*4882a593Smuzhiyun return -ENOMEM;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
1050*4882a593Smuzhiyun sizeof(long), GFP_KERNEL);
1051*4882a593Smuzhiyun if (!info->xfer_alloc_table)
1052*4882a593Smuzhiyun return -ENOMEM;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun /* Pre-initialize the buffer pointer to pre-allocated buffers */
1055*4882a593Smuzhiyun for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
1056*4882a593Smuzhiyun xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1057*4882a593Smuzhiyun GFP_KERNEL);
1058*4882a593Smuzhiyun if (!xfer->rx.buf)
1059*4882a593Smuzhiyun return -ENOMEM;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun xfer->tx.buf = xfer->rx.buf;
1062*4882a593Smuzhiyun init_completion(&xfer->done);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun spin_lock_init(&info->xfer_lock);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun return 0;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
scmi_xfer_info_init(struct scmi_info * sinfo)1070*4882a593Smuzhiyun static int scmi_xfer_info_init(struct scmi_info *sinfo)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1075*4882a593Smuzhiyun ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun return ret;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
scmi_chan_setup(struct scmi_info * info,struct device * dev,int prot_id,bool tx)1080*4882a593Smuzhiyun static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1081*4882a593Smuzhiyun int prot_id, bool tx)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun int ret, idx;
1084*4882a593Smuzhiyun struct scmi_chan_info *cinfo;
1085*4882a593Smuzhiyun struct idr *idr;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /* Transmit channel is first entry i.e. index 0 */
1088*4882a593Smuzhiyun idx = tx ? 0 : 1;
1089*4882a593Smuzhiyun idr = tx ? &info->tx_idr : &info->rx_idr;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* check if already allocated, used for multiple device per protocol */
1092*4882a593Smuzhiyun cinfo = idr_find(idr, prot_id);
1093*4882a593Smuzhiyun if (cinfo)
1094*4882a593Smuzhiyun return 0;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (!info->desc->ops->chan_available(dev, idx)) {
1097*4882a593Smuzhiyun cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1098*4882a593Smuzhiyun if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1099*4882a593Smuzhiyun return -EINVAL;
1100*4882a593Smuzhiyun goto idr_alloc;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1104*4882a593Smuzhiyun if (!cinfo)
1105*4882a593Smuzhiyun return -ENOMEM;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun cinfo->dev = dev;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1110*4882a593Smuzhiyun if (ret)
1111*4882a593Smuzhiyun return ret;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun idr_alloc:
1114*4882a593Smuzhiyun ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1115*4882a593Smuzhiyun if (ret != prot_id) {
1116*4882a593Smuzhiyun dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1117*4882a593Smuzhiyun return ret;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun cinfo->handle = &info->handle;
1121*4882a593Smuzhiyun return 0;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun static inline int
scmi_txrx_setup(struct scmi_info * info,struct device * dev,int prot_id)1125*4882a593Smuzhiyun scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun int ret = scmi_chan_setup(info, dev, prot_id, true);
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun if (!ret) {
1130*4882a593Smuzhiyun /* Rx is optional, report only memory errors */
1131*4882a593Smuzhiyun ret = scmi_chan_setup(info, dev, prot_id, false);
1132*4882a593Smuzhiyun if (ret && ret != -ENOMEM)
1133*4882a593Smuzhiyun ret = 0;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return ret;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /**
1140*4882a593Smuzhiyun * scmi_get_protocol_device - Helper to get/create an SCMI device.
1141*4882a593Smuzhiyun *
1142*4882a593Smuzhiyun * @np: A device node representing a valid active protocols for the referred
1143*4882a593Smuzhiyun * SCMI instance.
1144*4882a593Smuzhiyun * @info: The referred SCMI instance for which we are getting/creating this
1145*4882a593Smuzhiyun * device.
1146*4882a593Smuzhiyun * @prot_id: The protocol ID.
1147*4882a593Smuzhiyun * @name: The device name.
1148*4882a593Smuzhiyun *
1149*4882a593Smuzhiyun * Referring to the specific SCMI instance identified by @info, this helper
1150*4882a593Smuzhiyun * takes care to return a properly initialized device matching the requested
1151*4882a593Smuzhiyun * @proto_id and @name: if device was still not existent it is created as a
1152*4882a593Smuzhiyun * child of the specified SCMI instance @info and its transport properly
1153*4882a593Smuzhiyun * initialized as usual.
1154*4882a593Smuzhiyun */
1155*4882a593Smuzhiyun static inline struct scmi_device *
scmi_get_protocol_device(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)1156*4882a593Smuzhiyun scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1157*4882a593Smuzhiyun int prot_id, const char *name)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun struct scmi_device *sdev;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /* Already created for this parent SCMI instance ? */
1162*4882a593Smuzhiyun sdev = scmi_find_child_dev(info->dev, prot_id, name);
1163*4882a593Smuzhiyun if (sdev)
1164*4882a593Smuzhiyun return sdev;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun sdev = scmi_device_create(np, info->dev, prot_id, name);
1169*4882a593Smuzhiyun if (!sdev) {
1170*4882a593Smuzhiyun dev_err(info->dev, "failed to create %d protocol device\n",
1171*4882a593Smuzhiyun prot_id);
1172*4882a593Smuzhiyun return NULL;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1176*4882a593Smuzhiyun dev_err(&sdev->dev, "failed to setup transport\n");
1177*4882a593Smuzhiyun scmi_device_destroy(sdev);
1178*4882a593Smuzhiyun return NULL;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return sdev;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun static inline void
scmi_create_protocol_device(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)1185*4882a593Smuzhiyun scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1186*4882a593Smuzhiyun int prot_id, const char *name)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun struct scmi_device *sdev;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun sdev = scmi_get_protocol_device(np, info, prot_id, name);
1191*4882a593Smuzhiyun if (!sdev)
1192*4882a593Smuzhiyun return;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /* setup handle now as the transport is ready */
1195*4882a593Smuzhiyun scmi_set_handle(sdev);
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /**
1199*4882a593Smuzhiyun * scmi_create_protocol_devices - Create devices for all pending requests for
1200*4882a593Smuzhiyun * this SCMI instance.
1201*4882a593Smuzhiyun *
1202*4882a593Smuzhiyun * @np: The device node describing the protocol
1203*4882a593Smuzhiyun * @info: The SCMI instance descriptor
1204*4882a593Smuzhiyun * @prot_id: The protocol ID
1205*4882a593Smuzhiyun *
1206*4882a593Smuzhiyun * All devices previously requested for this instance (if any) are found and
1207*4882a593Smuzhiyun * created by scanning the proper @&scmi_requested_devices entry.
1208*4882a593Smuzhiyun */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id)1209*4882a593Smuzhiyun static void scmi_create_protocol_devices(struct device_node *np,
1210*4882a593Smuzhiyun struct scmi_info *info, int prot_id)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun struct list_head *phead;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun mutex_lock(&scmi_requested_devices_mtx);
1215*4882a593Smuzhiyun phead = idr_find(&scmi_requested_devices, prot_id);
1216*4882a593Smuzhiyun if (phead) {
1217*4882a593Smuzhiyun struct scmi_requested_dev *rdev;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun list_for_each_entry(rdev, phead, node)
1220*4882a593Smuzhiyun scmi_create_protocol_device(np, info, prot_id,
1221*4882a593Smuzhiyun rdev->id_table->name);
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun mutex_unlock(&scmi_requested_devices_mtx);
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /**
1227*4882a593Smuzhiyun * scmi_request_protocol_device - Helper to request a device
1228*4882a593Smuzhiyun *
1229*4882a593Smuzhiyun * @id_table: A protocol/name pair descriptor for the device to be created.
1230*4882a593Smuzhiyun *
1231*4882a593Smuzhiyun * This helper let an SCMI driver request specific devices identified by the
1232*4882a593Smuzhiyun * @id_table to be created for each active SCMI instance.
1233*4882a593Smuzhiyun *
1234*4882a593Smuzhiyun * The requested device name MUST NOT be already existent for any protocol;
1235*4882a593Smuzhiyun * at first the freshly requested @id_table is annotated in the IDR table
1236*4882a593Smuzhiyun * @scmi_requested_devices, then a matching device is created for each already
1237*4882a593Smuzhiyun * active SCMI instance. (if any)
1238*4882a593Smuzhiyun *
1239*4882a593Smuzhiyun * This way the requested device is created straight-away for all the already
1240*4882a593Smuzhiyun * initialized(probed) SCMI instances (handles) and it remains also annotated
1241*4882a593Smuzhiyun * as pending creation if the requesting SCMI driver was loaded before some
1242*4882a593Smuzhiyun * SCMI instance and related transports were available: when such late instance
1243*4882a593Smuzhiyun * is probed, its probe will take care to scan the list of pending requested
1244*4882a593Smuzhiyun * devices and create those on its own (see @scmi_create_protocol_devices and
1245*4882a593Smuzhiyun * its enclosing loop)
1246*4882a593Smuzhiyun *
1247*4882a593Smuzhiyun * Return: 0 on Success
1248*4882a593Smuzhiyun */
scmi_request_protocol_device(const struct scmi_device_id * id_table)1249*4882a593Smuzhiyun int scmi_request_protocol_device(const struct scmi_device_id *id_table)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun int ret = 0;
1252*4882a593Smuzhiyun unsigned int id = 0;
1253*4882a593Smuzhiyun struct list_head *head, *phead = NULL;
1254*4882a593Smuzhiyun struct scmi_requested_dev *rdev;
1255*4882a593Smuzhiyun struct scmi_info *info;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1258*4882a593Smuzhiyun id_table->name, id_table->protocol_id);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun /*
1261*4882a593Smuzhiyun * Search for the matching protocol rdev list and then search
1262*4882a593Smuzhiyun * of any existent equally named device...fails if any duplicate found.
1263*4882a593Smuzhiyun */
1264*4882a593Smuzhiyun mutex_lock(&scmi_requested_devices_mtx);
1265*4882a593Smuzhiyun idr_for_each_entry(&scmi_requested_devices, head, id) {
1266*4882a593Smuzhiyun if (!phead) {
1267*4882a593Smuzhiyun /* A list found registered in the IDR is never empty */
1268*4882a593Smuzhiyun rdev = list_first_entry(head, struct scmi_requested_dev,
1269*4882a593Smuzhiyun node);
1270*4882a593Smuzhiyun if (rdev->id_table->protocol_id ==
1271*4882a593Smuzhiyun id_table->protocol_id)
1272*4882a593Smuzhiyun phead = head;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun list_for_each_entry(rdev, head, node) {
1275*4882a593Smuzhiyun if (!strcmp(rdev->id_table->name, id_table->name)) {
1276*4882a593Smuzhiyun pr_err("Ignoring duplicate request [%d] %s\n",
1277*4882a593Smuzhiyun rdev->id_table->protocol_id,
1278*4882a593Smuzhiyun rdev->id_table->name);
1279*4882a593Smuzhiyun ret = -EINVAL;
1280*4882a593Smuzhiyun goto out;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun /*
1286*4882a593Smuzhiyun * No duplicate found for requested id_table, so let's create a new
1287*4882a593Smuzhiyun * requested device entry for this new valid request.
1288*4882a593Smuzhiyun */
1289*4882a593Smuzhiyun rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1290*4882a593Smuzhiyun if (!rdev) {
1291*4882a593Smuzhiyun ret = -ENOMEM;
1292*4882a593Smuzhiyun goto out;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun rdev->id_table = id_table;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun /*
1297*4882a593Smuzhiyun * Append the new requested device table descriptor to the head of the
1298*4882a593Smuzhiyun * related protocol list, eventually creating such head if not already
1299*4882a593Smuzhiyun * there.
1300*4882a593Smuzhiyun */
1301*4882a593Smuzhiyun if (!phead) {
1302*4882a593Smuzhiyun phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1303*4882a593Smuzhiyun if (!phead) {
1304*4882a593Smuzhiyun kfree(rdev);
1305*4882a593Smuzhiyun ret = -ENOMEM;
1306*4882a593Smuzhiyun goto out;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun INIT_LIST_HEAD(phead);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1311*4882a593Smuzhiyun id_table->protocol_id,
1312*4882a593Smuzhiyun id_table->protocol_id + 1, GFP_KERNEL);
1313*4882a593Smuzhiyun if (ret != id_table->protocol_id) {
1314*4882a593Smuzhiyun pr_err("Failed to save SCMI device - ret:%d\n", ret);
1315*4882a593Smuzhiyun kfree(rdev);
1316*4882a593Smuzhiyun kfree(phead);
1317*4882a593Smuzhiyun ret = -EINVAL;
1318*4882a593Smuzhiyun goto out;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun ret = 0;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun list_add(&rdev->node, phead);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /*
1325*4882a593Smuzhiyun * Now effectively create and initialize the requested device for every
1326*4882a593Smuzhiyun * already initialized SCMI instance which has registered the requested
1327*4882a593Smuzhiyun * protocol as a valid active one: i.e. defined in DT and supported by
1328*4882a593Smuzhiyun * current platform FW.
1329*4882a593Smuzhiyun */
1330*4882a593Smuzhiyun mutex_lock(&scmi_list_mutex);
1331*4882a593Smuzhiyun list_for_each_entry(info, &scmi_list, node) {
1332*4882a593Smuzhiyun struct device_node *child;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun child = idr_find(&info->active_protocols,
1335*4882a593Smuzhiyun id_table->protocol_id);
1336*4882a593Smuzhiyun if (child) {
1337*4882a593Smuzhiyun struct scmi_device *sdev;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun sdev = scmi_get_protocol_device(child, info,
1340*4882a593Smuzhiyun id_table->protocol_id,
1341*4882a593Smuzhiyun id_table->name);
1342*4882a593Smuzhiyun /* Set handle if not already set: device existed */
1343*4882a593Smuzhiyun if (sdev && !sdev->handle)
1344*4882a593Smuzhiyun sdev->handle = scmi_handle_get_from_info(info);
1345*4882a593Smuzhiyun } else {
1346*4882a593Smuzhiyun dev_err(info->dev,
1347*4882a593Smuzhiyun "Failed. SCMI protocol %d not active.\n",
1348*4882a593Smuzhiyun id_table->protocol_id);
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun mutex_unlock(&scmi_list_mutex);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun out:
1354*4882a593Smuzhiyun mutex_unlock(&scmi_requested_devices_mtx);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun return ret;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /**
1360*4882a593Smuzhiyun * scmi_unrequest_protocol_device - Helper to unrequest a device
1361*4882a593Smuzhiyun *
1362*4882a593Smuzhiyun * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1363*4882a593Smuzhiyun *
1364*4882a593Smuzhiyun * An helper to let an SCMI driver release its request about devices; note that
1365*4882a593Smuzhiyun * devices are created and initialized once the first SCMI driver request them
1366*4882a593Smuzhiyun * but they destroyed only on SCMI core unloading/unbinding.
1367*4882a593Smuzhiyun *
1368*4882a593Smuzhiyun * The current SCMI transport layer uses such devices as internal references and
1369*4882a593Smuzhiyun * as such they could be shared as same transport between multiple drivers so
1370*4882a593Smuzhiyun * that cannot be safely destroyed till the whole SCMI stack is removed.
1371*4882a593Smuzhiyun * (unless adding further burden of refcounting.)
1372*4882a593Smuzhiyun */
scmi_unrequest_protocol_device(const struct scmi_device_id * id_table)1373*4882a593Smuzhiyun void scmi_unrequest_protocol_device(const struct scmi_device_id *id_table)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun struct list_head *phead;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1378*4882a593Smuzhiyun id_table->name, id_table->protocol_id);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun mutex_lock(&scmi_requested_devices_mtx);
1381*4882a593Smuzhiyun phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1382*4882a593Smuzhiyun if (phead) {
1383*4882a593Smuzhiyun struct scmi_requested_dev *victim, *tmp;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun list_for_each_entry_safe(victim, tmp, phead, node) {
1386*4882a593Smuzhiyun if (!strcmp(victim->id_table->name, id_table->name)) {
1387*4882a593Smuzhiyun list_del(&victim->node);
1388*4882a593Smuzhiyun kfree(victim);
1389*4882a593Smuzhiyun break;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun if (list_empty(phead)) {
1394*4882a593Smuzhiyun idr_remove(&scmi_requested_devices,
1395*4882a593Smuzhiyun id_table->protocol_id);
1396*4882a593Smuzhiyun kfree(phead);
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun mutex_unlock(&scmi_requested_devices_mtx);
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
scmi_cleanup_txrx_channels(struct scmi_info * info)1402*4882a593Smuzhiyun static int scmi_cleanup_txrx_channels(struct scmi_info *info)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun int ret;
1405*4882a593Smuzhiyun struct idr *idr = &info->tx_idr;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1408*4882a593Smuzhiyun idr_destroy(&info->tx_idr);
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun idr = &info->rx_idr;
1411*4882a593Smuzhiyun ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1412*4882a593Smuzhiyun idr_destroy(&info->rx_idr);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun return ret;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
scmi_probe(struct platform_device * pdev)1417*4882a593Smuzhiyun static int scmi_probe(struct platform_device *pdev)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun int ret;
1420*4882a593Smuzhiyun struct scmi_handle *handle;
1421*4882a593Smuzhiyun const struct scmi_desc *desc;
1422*4882a593Smuzhiyun struct scmi_info *info;
1423*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1424*4882a593Smuzhiyun struct device_node *child, *np = dev->of_node;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun desc = of_device_get_match_data(dev);
1427*4882a593Smuzhiyun if (!desc)
1428*4882a593Smuzhiyun return -EINVAL;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1431*4882a593Smuzhiyun if (!info)
1432*4882a593Smuzhiyun return -ENOMEM;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun info->dev = dev;
1435*4882a593Smuzhiyun info->desc = desc;
1436*4882a593Smuzhiyun INIT_LIST_HEAD(&info->node);
1437*4882a593Smuzhiyun idr_init(&info->protocols);
1438*4882a593Smuzhiyun mutex_init(&info->protocols_mtx);
1439*4882a593Smuzhiyun idr_init(&info->active_protocols);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun platform_set_drvdata(pdev, info);
1442*4882a593Smuzhiyun idr_init(&info->tx_idr);
1443*4882a593Smuzhiyun idr_init(&info->rx_idr);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun handle = &info->handle;
1446*4882a593Smuzhiyun handle->dev = info->dev;
1447*4882a593Smuzhiyun handle->version = &info->version;
1448*4882a593Smuzhiyun handle->devm_acquire_protocol = scmi_devm_acquire_protocol;
1449*4882a593Smuzhiyun handle->devm_get_protocol = scmi_devm_get_protocol;
1450*4882a593Smuzhiyun handle->devm_put_protocol = scmi_devm_put_protocol;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1453*4882a593Smuzhiyun if (ret)
1454*4882a593Smuzhiyun return ret;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun ret = scmi_xfer_info_init(info);
1457*4882a593Smuzhiyun if (ret)
1458*4882a593Smuzhiyun goto clear_txrx_setup;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun if (scmi_notification_init(handle))
1461*4882a593Smuzhiyun dev_err(dev, "SCMI Notifications NOT available.\n");
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun /*
1464*4882a593Smuzhiyun * Trigger SCMI Base protocol initialization.
1465*4882a593Smuzhiyun * It's mandatory and won't be ever released/deinit until the
1466*4882a593Smuzhiyun * SCMI stack is shutdown/unloaded as a whole.
1467*4882a593Smuzhiyun */
1468*4882a593Smuzhiyun ret = scmi_acquire_protocol(handle, SCMI_PROTOCOL_BASE);
1469*4882a593Smuzhiyun if (ret) {
1470*4882a593Smuzhiyun dev_err(dev, "unable to communicate with SCMI\n");
1471*4882a593Smuzhiyun goto notification_exit;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun mutex_lock(&scmi_list_mutex);
1475*4882a593Smuzhiyun list_add_tail(&info->node, &scmi_list);
1476*4882a593Smuzhiyun mutex_unlock(&scmi_list_mutex);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun for_each_available_child_of_node(np, child) {
1479*4882a593Smuzhiyun u32 prot_id;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (of_property_read_u32(child, "reg", &prot_id))
1482*4882a593Smuzhiyun continue;
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
1485*4882a593Smuzhiyun dev_err(dev, "Out of range protocol %d\n", prot_id);
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun if (!scmi_is_protocol_implemented(handle, prot_id)) {
1488*4882a593Smuzhiyun dev_err(dev, "SCMI protocol %d not implemented\n",
1489*4882a593Smuzhiyun prot_id);
1490*4882a593Smuzhiyun continue;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /*
1494*4882a593Smuzhiyun * Save this valid DT protocol descriptor amongst
1495*4882a593Smuzhiyun * @active_protocols for this SCMI instance/
1496*4882a593Smuzhiyun */
1497*4882a593Smuzhiyun ret = idr_alloc(&info->active_protocols, child,
1498*4882a593Smuzhiyun prot_id, prot_id + 1, GFP_KERNEL);
1499*4882a593Smuzhiyun if (ret != prot_id) {
1500*4882a593Smuzhiyun dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1501*4882a593Smuzhiyun prot_id);
1502*4882a593Smuzhiyun continue;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun of_node_get(child);
1506*4882a593Smuzhiyun scmi_create_protocol_devices(child, info, prot_id);
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun return 0;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun notification_exit:
1512*4882a593Smuzhiyun scmi_notification_exit(&info->handle);
1513*4882a593Smuzhiyun clear_txrx_setup:
1514*4882a593Smuzhiyun scmi_cleanup_txrx_channels(info);
1515*4882a593Smuzhiyun return ret;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
scmi_free_channel(struct scmi_chan_info * cinfo,struct idr * idr,int id)1518*4882a593Smuzhiyun void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
1519*4882a593Smuzhiyun {
1520*4882a593Smuzhiyun idr_remove(idr, id);
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
scmi_remove(struct platform_device * pdev)1523*4882a593Smuzhiyun static int scmi_remove(struct platform_device *pdev)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun int ret = 0, id;
1526*4882a593Smuzhiyun struct scmi_info *info = platform_get_drvdata(pdev);
1527*4882a593Smuzhiyun struct device_node *child;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun mutex_lock(&scmi_list_mutex);
1530*4882a593Smuzhiyun if (info->users)
1531*4882a593Smuzhiyun ret = -EBUSY;
1532*4882a593Smuzhiyun else
1533*4882a593Smuzhiyun list_del(&info->node);
1534*4882a593Smuzhiyun mutex_unlock(&scmi_list_mutex);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (ret)
1537*4882a593Smuzhiyun return ret;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun scmi_notification_exit(&info->handle);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun mutex_lock(&info->protocols_mtx);
1542*4882a593Smuzhiyun idr_destroy(&info->protocols);
1543*4882a593Smuzhiyun mutex_unlock(&info->protocols_mtx);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun idr_for_each_entry(&info->active_protocols, child, id)
1546*4882a593Smuzhiyun of_node_put(child);
1547*4882a593Smuzhiyun idr_destroy(&info->active_protocols);
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun /* Safe to free channels since no more users */
1550*4882a593Smuzhiyun return scmi_cleanup_txrx_channels(info);
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)1553*4882a593Smuzhiyun static ssize_t protocol_version_show(struct device *dev,
1554*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun struct scmi_info *info = dev_get_drvdata(dev);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun return sprintf(buf, "%u.%u\n", info->version.major_ver,
1559*4882a593Smuzhiyun info->version.minor_ver);
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun static DEVICE_ATTR_RO(protocol_version);
1562*4882a593Smuzhiyun
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)1563*4882a593Smuzhiyun static ssize_t firmware_version_show(struct device *dev,
1564*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun struct scmi_info *info = dev_get_drvdata(dev);
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun return sprintf(buf, "0x%x\n", info->version.impl_ver);
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun static DEVICE_ATTR_RO(firmware_version);
1571*4882a593Smuzhiyun
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)1572*4882a593Smuzhiyun static ssize_t vendor_id_show(struct device *dev,
1573*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun struct scmi_info *info = dev_get_drvdata(dev);
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun return sprintf(buf, "%s\n", info->version.vendor_id);
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun static DEVICE_ATTR_RO(vendor_id);
1580*4882a593Smuzhiyun
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)1581*4882a593Smuzhiyun static ssize_t sub_vendor_id_show(struct device *dev,
1582*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1583*4882a593Smuzhiyun {
1584*4882a593Smuzhiyun struct scmi_info *info = dev_get_drvdata(dev);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun static DEVICE_ATTR_RO(sub_vendor_id);
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun static struct attribute *versions_attrs[] = {
1591*4882a593Smuzhiyun &dev_attr_firmware_version.attr,
1592*4882a593Smuzhiyun &dev_attr_protocol_version.attr,
1593*4882a593Smuzhiyun &dev_attr_vendor_id.attr,
1594*4882a593Smuzhiyun &dev_attr_sub_vendor_id.attr,
1595*4882a593Smuzhiyun NULL,
1596*4882a593Smuzhiyun };
1597*4882a593Smuzhiyun ATTRIBUTE_GROUPS(versions);
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun /* Each compatible listed below must have descriptor associated with it */
1600*4882a593Smuzhiyun static const struct of_device_id scmi_of_match[] = {
1601*4882a593Smuzhiyun #ifdef CONFIG_MAILBOX
1602*4882a593Smuzhiyun { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
1603*4882a593Smuzhiyun #endif
1604*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
1605*4882a593Smuzhiyun { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
1606*4882a593Smuzhiyun #endif
1607*4882a593Smuzhiyun { /* Sentinel */ },
1608*4882a593Smuzhiyun };
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, scmi_of_match);
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun static struct platform_driver scmi_driver = {
1613*4882a593Smuzhiyun .driver = {
1614*4882a593Smuzhiyun .name = "arm-scmi",
1615*4882a593Smuzhiyun .suppress_bind_attrs = true,
1616*4882a593Smuzhiyun .of_match_table = scmi_of_match,
1617*4882a593Smuzhiyun .dev_groups = versions_groups,
1618*4882a593Smuzhiyun },
1619*4882a593Smuzhiyun .probe = scmi_probe,
1620*4882a593Smuzhiyun .remove = scmi_remove,
1621*4882a593Smuzhiyun };
1622*4882a593Smuzhiyun
scmi_driver_init(void)1623*4882a593Smuzhiyun static int __init scmi_driver_init(void)
1624*4882a593Smuzhiyun {
1625*4882a593Smuzhiyun scmi_bus_init();
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun scmi_base_register();
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun scmi_clock_register();
1630*4882a593Smuzhiyun scmi_perf_register();
1631*4882a593Smuzhiyun scmi_power_register();
1632*4882a593Smuzhiyun scmi_reset_register();
1633*4882a593Smuzhiyun scmi_sensors_register();
1634*4882a593Smuzhiyun scmi_voltage_register();
1635*4882a593Smuzhiyun scmi_system_register();
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun return platform_driver_register(&scmi_driver);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun subsys_initcall(scmi_driver_init);
1640*4882a593Smuzhiyun
scmi_driver_exit(void)1641*4882a593Smuzhiyun static void __exit scmi_driver_exit(void)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun scmi_base_unregister();
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun scmi_clock_unregister();
1646*4882a593Smuzhiyun scmi_perf_unregister();
1647*4882a593Smuzhiyun scmi_power_unregister();
1648*4882a593Smuzhiyun scmi_reset_unregister();
1649*4882a593Smuzhiyun scmi_sensors_unregister();
1650*4882a593Smuzhiyun scmi_voltage_unregister();
1651*4882a593Smuzhiyun scmi_system_unregister();
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun scmi_bus_exit();
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun platform_driver_unregister(&scmi_driver);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun module_exit(scmi_driver_exit);
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun MODULE_ALIAS("platform:arm-scmi");
1660*4882a593Smuzhiyun MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1661*4882a593Smuzhiyun MODULE_DESCRIPTION("ARM SCMI protocol driver");
1662*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1663