1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016-2017, Linaro Ltd
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/idr.h>
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/of_irq.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/regmap.h>
17*4882a593Smuzhiyun #include <linux/rpmsg.h>
18*4882a593Smuzhiyun #include <linux/sizes.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/workqueue.h>
21*4882a593Smuzhiyun #include <linux/mailbox_client.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "rpmsg_internal.h"
24*4882a593Smuzhiyun #include "qcom_glink_native.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define GLINK_NAME_SIZE 32
27*4882a593Smuzhiyun #define GLINK_VERSION_1 1
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define RPM_GLINK_CID_MIN 1
30*4882a593Smuzhiyun #define RPM_GLINK_CID_MAX 65536
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct glink_msg {
33*4882a593Smuzhiyun __le16 cmd;
34*4882a593Smuzhiyun __le16 param1;
35*4882a593Smuzhiyun __le32 param2;
36*4882a593Smuzhiyun u8 data[];
37*4882a593Smuzhiyun } __packed;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun * struct glink_defer_cmd - deferred incoming control message
41*4882a593Smuzhiyun * @node: list node
42*4882a593Smuzhiyun * @msg: message header
43*4882a593Smuzhiyun * @data: payload of the message
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Copy of a received control message, to be added to @rx_queue and processed
46*4882a593Smuzhiyun * by @rx_work of @qcom_glink.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun struct glink_defer_cmd {
49*4882a593Smuzhiyun struct list_head node;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct glink_msg msg;
52*4882a593Smuzhiyun u8 data[];
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /**
56*4882a593Smuzhiyun * struct glink_core_rx_intent - RX intent
57*4882a593Smuzhiyun * RX intent
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * @data: pointer to the data (may be NULL for zero-copy)
60*4882a593Smuzhiyun * @id: remote or local intent ID
61*4882a593Smuzhiyun * @size: size of the original intent (do not modify)
62*4882a593Smuzhiyun * @reuse: To mark if the intent can be reused after first use
63*4882a593Smuzhiyun * @in_use: To mark if intent is already in use for the channel
64*4882a593Smuzhiyun * @offset: next write offset (initially 0)
65*4882a593Smuzhiyun * @node: list node
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun struct glink_core_rx_intent {
68*4882a593Smuzhiyun void *data;
69*4882a593Smuzhiyun u32 id;
70*4882a593Smuzhiyun size_t size;
71*4882a593Smuzhiyun bool reuse;
72*4882a593Smuzhiyun bool in_use;
73*4882a593Smuzhiyun u32 offset;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun struct list_head node;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun * struct qcom_glink - driver context, relates to one remote subsystem
80*4882a593Smuzhiyun * @dev: reference to the associated struct device
81*4882a593Smuzhiyun * @mbox_client: mailbox client
82*4882a593Smuzhiyun * @mbox_chan: mailbox channel
83*4882a593Smuzhiyun * @rx_pipe: pipe object for receive FIFO
84*4882a593Smuzhiyun * @tx_pipe: pipe object for transmit FIFO
85*4882a593Smuzhiyun * @irq: IRQ for signaling incoming events
86*4882a593Smuzhiyun * @rx_work: worker for handling received control messages
87*4882a593Smuzhiyun * @rx_lock: protects the @rx_queue
88*4882a593Smuzhiyun * @rx_queue: queue of received control messages to be processed in @rx_work
89*4882a593Smuzhiyun * @tx_lock: synchronizes operations on the tx fifo
90*4882a593Smuzhiyun * @idr_lock: synchronizes @lcids and @rcids modifications
91*4882a593Smuzhiyun * @lcids: idr of all channels with a known local channel id
92*4882a593Smuzhiyun * @rcids: idr of all channels with a known remote channel id
93*4882a593Smuzhiyun * @features: remote features
94*4882a593Smuzhiyun * @intentless: flag to indicate that there is no intent
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun struct qcom_glink {
97*4882a593Smuzhiyun struct device *dev;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun const char *name;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun struct mbox_client mbox_client;
102*4882a593Smuzhiyun struct mbox_chan *mbox_chan;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct qcom_glink_pipe *rx_pipe;
105*4882a593Smuzhiyun struct qcom_glink_pipe *tx_pipe;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun int irq;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun struct work_struct rx_work;
110*4882a593Smuzhiyun spinlock_t rx_lock;
111*4882a593Smuzhiyun struct list_head rx_queue;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun spinlock_t tx_lock;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun spinlock_t idr_lock;
116*4882a593Smuzhiyun struct idr lcids;
117*4882a593Smuzhiyun struct idr rcids;
118*4882a593Smuzhiyun unsigned long features;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun bool intentless;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun enum {
124*4882a593Smuzhiyun GLINK_STATE_CLOSED,
125*4882a593Smuzhiyun GLINK_STATE_OPENING,
126*4882a593Smuzhiyun GLINK_STATE_OPEN,
127*4882a593Smuzhiyun GLINK_STATE_CLOSING,
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * struct glink_channel - internal representation of a channel
132*4882a593Smuzhiyun * @rpdev: rpdev reference, only used for primary endpoints
133*4882a593Smuzhiyun * @ept: rpmsg endpoint this channel is associated with
134*4882a593Smuzhiyun * @glink: qcom_glink context handle
135*4882a593Smuzhiyun * @refcount: refcount for the channel object
136*4882a593Smuzhiyun * @recv_lock: guard for @ept.cb
137*4882a593Smuzhiyun * @name: unique channel name/identifier
138*4882a593Smuzhiyun * @lcid: channel id, in local space
139*4882a593Smuzhiyun * @rcid: channel id, in remote space
140*4882a593Smuzhiyun * @intent_lock: lock for protection of @liids, @riids
141*4882a593Smuzhiyun * @liids: idr of all local intents
142*4882a593Smuzhiyun * @riids: idr of all remote intents
143*4882a593Smuzhiyun * @intent_work: worker responsible for transmitting rx_done packets
144*4882a593Smuzhiyun * @done_intents: list of intents that needs to be announced rx_done
145*4882a593Smuzhiyun * @buf: receive buffer, for gathering fragments
146*4882a593Smuzhiyun * @buf_offset: write offset in @buf
147*4882a593Smuzhiyun * @buf_size: size of current @buf
148*4882a593Smuzhiyun * @open_ack: completed once remote has acked the open-request
149*4882a593Smuzhiyun * @open_req: completed once open-request has been received
150*4882a593Smuzhiyun * @intent_req_lock: Synchronises multiple intent requests
151*4882a593Smuzhiyun * @intent_req_result: Result of intent request
152*4882a593Smuzhiyun * @intent_req_comp: Completion for intent_req signalling
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun struct glink_channel {
155*4882a593Smuzhiyun struct rpmsg_endpoint ept;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun struct rpmsg_device *rpdev;
158*4882a593Smuzhiyun struct qcom_glink *glink;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun struct kref refcount;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun spinlock_t recv_lock;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun char *name;
165*4882a593Smuzhiyun unsigned int lcid;
166*4882a593Smuzhiyun unsigned int rcid;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun spinlock_t intent_lock;
169*4882a593Smuzhiyun struct idr liids;
170*4882a593Smuzhiyun struct idr riids;
171*4882a593Smuzhiyun struct work_struct intent_work;
172*4882a593Smuzhiyun struct list_head done_intents;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun struct glink_core_rx_intent *buf;
175*4882a593Smuzhiyun int buf_offset;
176*4882a593Smuzhiyun int buf_size;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun struct completion open_ack;
179*4882a593Smuzhiyun struct completion open_req;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun struct mutex intent_req_lock;
182*4882a593Smuzhiyun bool intent_req_result;
183*4882a593Smuzhiyun struct completion intent_req_comp;
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun static const struct rpmsg_endpoint_ops glink_endpoint_ops;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #define RPM_CMD_VERSION 0
191*4882a593Smuzhiyun #define RPM_CMD_VERSION_ACK 1
192*4882a593Smuzhiyun #define RPM_CMD_OPEN 2
193*4882a593Smuzhiyun #define RPM_CMD_CLOSE 3
194*4882a593Smuzhiyun #define RPM_CMD_OPEN_ACK 4
195*4882a593Smuzhiyun #define RPM_CMD_INTENT 5
196*4882a593Smuzhiyun #define RPM_CMD_RX_DONE 6
197*4882a593Smuzhiyun #define RPM_CMD_RX_INTENT_REQ 7
198*4882a593Smuzhiyun #define RPM_CMD_RX_INTENT_REQ_ACK 8
199*4882a593Smuzhiyun #define RPM_CMD_TX_DATA 9
200*4882a593Smuzhiyun #define RPM_CMD_CLOSE_ACK 11
201*4882a593Smuzhiyun #define RPM_CMD_TX_DATA_CONT 12
202*4882a593Smuzhiyun #define RPM_CMD_READ_NOTIF 13
203*4882a593Smuzhiyun #define RPM_CMD_RX_DONE_W_REUSE 14
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun #define GLINK_FEATURE_INTENTLESS BIT(1)
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun static void qcom_glink_rx_done_work(struct work_struct *work);
208*4882a593Smuzhiyun
qcom_glink_alloc_channel(struct qcom_glink * glink,const char * name)209*4882a593Smuzhiyun static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
210*4882a593Smuzhiyun const char *name)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct glink_channel *channel;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun channel = kzalloc(sizeof(*channel), GFP_KERNEL);
215*4882a593Smuzhiyun if (!channel)
216*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Setup glink internal glink_channel data */
219*4882a593Smuzhiyun spin_lock_init(&channel->recv_lock);
220*4882a593Smuzhiyun spin_lock_init(&channel->intent_lock);
221*4882a593Smuzhiyun mutex_init(&channel->intent_req_lock);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun channel->glink = glink;
224*4882a593Smuzhiyun channel->name = kstrdup(name, GFP_KERNEL);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun init_completion(&channel->open_req);
227*4882a593Smuzhiyun init_completion(&channel->open_ack);
228*4882a593Smuzhiyun init_completion(&channel->intent_req_comp);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun INIT_LIST_HEAD(&channel->done_intents);
231*4882a593Smuzhiyun INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun idr_init(&channel->liids);
234*4882a593Smuzhiyun idr_init(&channel->riids);
235*4882a593Smuzhiyun kref_init(&channel->refcount);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return channel;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
qcom_glink_channel_release(struct kref * ref)240*4882a593Smuzhiyun static void qcom_glink_channel_release(struct kref *ref)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct glink_channel *channel = container_of(ref, struct glink_channel,
243*4882a593Smuzhiyun refcount);
244*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
245*4882a593Smuzhiyun struct glink_core_rx_intent *tmp;
246*4882a593Smuzhiyun unsigned long flags;
247*4882a593Smuzhiyun int iid;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* cancel pending rx_done work */
250*4882a593Smuzhiyun cancel_work_sync(&channel->intent_work);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
253*4882a593Smuzhiyun /* Free all non-reuse intents pending rx_done work */
254*4882a593Smuzhiyun list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
255*4882a593Smuzhiyun if (!intent->reuse) {
256*4882a593Smuzhiyun kfree(intent->data);
257*4882a593Smuzhiyun kfree(intent);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun idr_for_each_entry(&channel->liids, tmp, iid) {
262*4882a593Smuzhiyun kfree(tmp->data);
263*4882a593Smuzhiyun kfree(tmp);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun idr_destroy(&channel->liids);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun idr_for_each_entry(&channel->riids, tmp, iid)
268*4882a593Smuzhiyun kfree(tmp);
269*4882a593Smuzhiyun idr_destroy(&channel->riids);
270*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun kfree(channel->name);
273*4882a593Smuzhiyun kfree(channel);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
qcom_glink_rx_avail(struct qcom_glink * glink)276*4882a593Smuzhiyun static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun return glink->rx_pipe->avail(glink->rx_pipe);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
qcom_glink_rx_peak(struct qcom_glink * glink,void * data,unsigned int offset,size_t count)281*4882a593Smuzhiyun static void qcom_glink_rx_peak(struct qcom_glink *glink,
282*4882a593Smuzhiyun void *data, unsigned int offset, size_t count)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun glink->rx_pipe->peak(glink->rx_pipe, data, offset, count);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
qcom_glink_rx_advance(struct qcom_glink * glink,size_t count)287*4882a593Smuzhiyun static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun glink->rx_pipe->advance(glink->rx_pipe, count);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
qcom_glink_tx_avail(struct qcom_glink * glink)292*4882a593Smuzhiyun static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun return glink->tx_pipe->avail(glink->tx_pipe);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
qcom_glink_tx_write(struct qcom_glink * glink,const void * hdr,size_t hlen,const void * data,size_t dlen)297*4882a593Smuzhiyun static void qcom_glink_tx_write(struct qcom_glink *glink,
298*4882a593Smuzhiyun const void *hdr, size_t hlen,
299*4882a593Smuzhiyun const void *data, size_t dlen)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
qcom_glink_tx(struct qcom_glink * glink,const void * hdr,size_t hlen,const void * data,size_t dlen,bool wait)304*4882a593Smuzhiyun static int qcom_glink_tx(struct qcom_glink *glink,
305*4882a593Smuzhiyun const void *hdr, size_t hlen,
306*4882a593Smuzhiyun const void *data, size_t dlen, bool wait)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun unsigned int tlen = hlen + dlen;
309*4882a593Smuzhiyun unsigned long flags;
310*4882a593Smuzhiyun int ret = 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Reject packets that are too big */
313*4882a593Smuzhiyun if (tlen >= glink->tx_pipe->length)
314*4882a593Smuzhiyun return -EINVAL;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun spin_lock_irqsave(&glink->tx_lock, flags);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun while (qcom_glink_tx_avail(glink) < tlen) {
319*4882a593Smuzhiyun if (!wait) {
320*4882a593Smuzhiyun ret = -EAGAIN;
321*4882a593Smuzhiyun goto out;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Wait without holding the tx_lock */
325*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->tx_lock, flags);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun usleep_range(10000, 15000);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun spin_lock_irqsave(&glink->tx_lock, flags);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun mbox_send_message(glink->mbox_chan, NULL);
335*4882a593Smuzhiyun mbox_client_txdone(glink->mbox_chan, 0);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun out:
338*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->tx_lock, flags);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return ret;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
qcom_glink_send_version(struct qcom_glink * glink)343*4882a593Smuzhiyun static int qcom_glink_send_version(struct qcom_glink *glink)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct glink_msg msg;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
348*4882a593Smuzhiyun msg.param1 = cpu_to_le16(GLINK_VERSION_1);
349*4882a593Smuzhiyun msg.param2 = cpu_to_le32(glink->features);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
qcom_glink_send_version_ack(struct qcom_glink * glink)354*4882a593Smuzhiyun static void qcom_glink_send_version_ack(struct qcom_glink *glink)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun struct glink_msg msg;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
359*4882a593Smuzhiyun msg.param1 = cpu_to_le16(GLINK_VERSION_1);
360*4882a593Smuzhiyun msg.param2 = cpu_to_le32(glink->features);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
qcom_glink_send_open_ack(struct qcom_glink * glink,struct glink_channel * channel)365*4882a593Smuzhiyun static void qcom_glink_send_open_ack(struct qcom_glink *glink,
366*4882a593Smuzhiyun struct glink_channel *channel)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct glink_msg msg;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
371*4882a593Smuzhiyun msg.param1 = cpu_to_le16(channel->rcid);
372*4882a593Smuzhiyun msg.param2 = cpu_to_le32(0);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
qcom_glink_handle_intent_req_ack(struct qcom_glink * glink,unsigned int cid,bool granted)377*4882a593Smuzhiyun static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
378*4882a593Smuzhiyun unsigned int cid, bool granted)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct glink_channel *channel;
381*4882a593Smuzhiyun unsigned long flags;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
384*4882a593Smuzhiyun channel = idr_find(&glink->rcids, cid);
385*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
386*4882a593Smuzhiyun if (!channel) {
387*4882a593Smuzhiyun dev_err(glink->dev, "unable to find channel\n");
388*4882a593Smuzhiyun return;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun channel->intent_req_result = granted;
392*4882a593Smuzhiyun complete(&channel->intent_req_comp);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
397*4882a593Smuzhiyun * @glink: Ptr to the glink edge
398*4882a593Smuzhiyun * @channel: Ptr to the channel that the open req is sent
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
401*4882a593Smuzhiyun * Will return with refcount held, regardless of outcome.
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * Returns 0 on success, negative errno otherwise.
404*4882a593Smuzhiyun */
qcom_glink_send_open_req(struct qcom_glink * glink,struct glink_channel * channel)405*4882a593Smuzhiyun static int qcom_glink_send_open_req(struct qcom_glink *glink,
406*4882a593Smuzhiyun struct glink_channel *channel)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct {
409*4882a593Smuzhiyun struct glink_msg msg;
410*4882a593Smuzhiyun u8 name[GLINK_NAME_SIZE];
411*4882a593Smuzhiyun } __packed req;
412*4882a593Smuzhiyun int name_len = strlen(channel->name) + 1;
413*4882a593Smuzhiyun int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
414*4882a593Smuzhiyun int ret;
415*4882a593Smuzhiyun unsigned long flags;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun kref_get(&channel->refcount);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
420*4882a593Smuzhiyun ret = idr_alloc_cyclic(&glink->lcids, channel,
421*4882a593Smuzhiyun RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
422*4882a593Smuzhiyun GFP_ATOMIC);
423*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
424*4882a593Smuzhiyun if (ret < 0)
425*4882a593Smuzhiyun return ret;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun channel->lcid = ret;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
430*4882a593Smuzhiyun req.msg.param1 = cpu_to_le16(channel->lcid);
431*4882a593Smuzhiyun req.msg.param2 = cpu_to_le32(name_len);
432*4882a593Smuzhiyun strcpy(req.name, channel->name);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
435*4882a593Smuzhiyun if (ret)
436*4882a593Smuzhiyun goto remove_idr;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun remove_idr:
441*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
442*4882a593Smuzhiyun idr_remove(&glink->lcids, channel->lcid);
443*4882a593Smuzhiyun channel->lcid = 0;
444*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return ret;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
qcom_glink_send_close_req(struct qcom_glink * glink,struct glink_channel * channel)449*4882a593Smuzhiyun static void qcom_glink_send_close_req(struct qcom_glink *glink,
450*4882a593Smuzhiyun struct glink_channel *channel)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct glink_msg req;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
455*4882a593Smuzhiyun req.param1 = cpu_to_le16(channel->lcid);
456*4882a593Smuzhiyun req.param2 = 0;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
qcom_glink_send_close_ack(struct qcom_glink * glink,unsigned int rcid)461*4882a593Smuzhiyun static void qcom_glink_send_close_ack(struct qcom_glink *glink,
462*4882a593Smuzhiyun unsigned int rcid)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct glink_msg req;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
467*4882a593Smuzhiyun req.param1 = cpu_to_le16(rcid);
468*4882a593Smuzhiyun req.param2 = 0;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
qcom_glink_rx_done_work(struct work_struct * work)473*4882a593Smuzhiyun static void qcom_glink_rx_done_work(struct work_struct *work)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun struct glink_channel *channel = container_of(work, struct glink_channel,
476*4882a593Smuzhiyun intent_work);
477*4882a593Smuzhiyun struct qcom_glink *glink = channel->glink;
478*4882a593Smuzhiyun struct glink_core_rx_intent *intent, *tmp;
479*4882a593Smuzhiyun struct {
480*4882a593Smuzhiyun u16 id;
481*4882a593Smuzhiyun u16 lcid;
482*4882a593Smuzhiyun u32 liid;
483*4882a593Smuzhiyun } __packed cmd;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun unsigned int cid = channel->lcid;
486*4882a593Smuzhiyun unsigned int iid;
487*4882a593Smuzhiyun bool reuse;
488*4882a593Smuzhiyun unsigned long flags;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
491*4882a593Smuzhiyun list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
492*4882a593Smuzhiyun list_del(&intent->node);
493*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
494*4882a593Smuzhiyun iid = intent->id;
495*4882a593Smuzhiyun reuse = intent->reuse;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
498*4882a593Smuzhiyun cmd.lcid = cid;
499*4882a593Smuzhiyun cmd.liid = iid;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
502*4882a593Smuzhiyun if (!reuse) {
503*4882a593Smuzhiyun kfree(intent->data);
504*4882a593Smuzhiyun kfree(intent);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
qcom_glink_rx_done(struct qcom_glink * glink,struct glink_channel * channel,struct glink_core_rx_intent * intent)511*4882a593Smuzhiyun static void qcom_glink_rx_done(struct qcom_glink *glink,
512*4882a593Smuzhiyun struct glink_channel *channel,
513*4882a593Smuzhiyun struct glink_core_rx_intent *intent)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun /* We don't send RX_DONE to intentless systems */
516*4882a593Smuzhiyun if (glink->intentless) {
517*4882a593Smuzhiyun kfree(intent->data);
518*4882a593Smuzhiyun kfree(intent);
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Take it off the tree of receive intents */
523*4882a593Smuzhiyun if (!intent->reuse) {
524*4882a593Smuzhiyun spin_lock(&channel->intent_lock);
525*4882a593Smuzhiyun idr_remove(&channel->liids, intent->id);
526*4882a593Smuzhiyun spin_unlock(&channel->intent_lock);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Schedule the sending of a rx_done indication */
530*4882a593Smuzhiyun spin_lock(&channel->intent_lock);
531*4882a593Smuzhiyun list_add_tail(&intent->node, &channel->done_intents);
532*4882a593Smuzhiyun spin_unlock(&channel->intent_lock);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun schedule_work(&channel->intent_work);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /**
538*4882a593Smuzhiyun * qcom_glink_receive_version() - receive version/features from remote system
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * @glink: pointer to transport interface
541*4882a593Smuzhiyun * @version: remote version
542*4882a593Smuzhiyun * @features: remote features
543*4882a593Smuzhiyun *
544*4882a593Smuzhiyun * This function is called in response to a remote-initiated version/feature
545*4882a593Smuzhiyun * negotiation sequence.
546*4882a593Smuzhiyun */
qcom_glink_receive_version(struct qcom_glink * glink,u32 version,u32 features)547*4882a593Smuzhiyun static void qcom_glink_receive_version(struct qcom_glink *glink,
548*4882a593Smuzhiyun u32 version,
549*4882a593Smuzhiyun u32 features)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun switch (version) {
552*4882a593Smuzhiyun case 0:
553*4882a593Smuzhiyun break;
554*4882a593Smuzhiyun case GLINK_VERSION_1:
555*4882a593Smuzhiyun glink->features &= features;
556*4882a593Smuzhiyun fallthrough;
557*4882a593Smuzhiyun default:
558*4882a593Smuzhiyun qcom_glink_send_version_ack(glink);
559*4882a593Smuzhiyun break;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
565*4882a593Smuzhiyun *
566*4882a593Smuzhiyun * @glink: pointer to transport interface
567*4882a593Smuzhiyun * @version: remote version response
568*4882a593Smuzhiyun * @features: remote features response
569*4882a593Smuzhiyun *
570*4882a593Smuzhiyun * This function is called in response to a local-initiated version/feature
571*4882a593Smuzhiyun * negotiation sequence and is the counter-offer from the remote side based
572*4882a593Smuzhiyun * upon the initial version and feature set requested.
573*4882a593Smuzhiyun */
qcom_glink_receive_version_ack(struct qcom_glink * glink,u32 version,u32 features)574*4882a593Smuzhiyun static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
575*4882a593Smuzhiyun u32 version,
576*4882a593Smuzhiyun u32 features)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun switch (version) {
579*4882a593Smuzhiyun case 0:
580*4882a593Smuzhiyun /* Version negotiation failed */
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun case GLINK_VERSION_1:
583*4882a593Smuzhiyun if (features == glink->features)
584*4882a593Smuzhiyun break;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun glink->features &= features;
587*4882a593Smuzhiyun fallthrough;
588*4882a593Smuzhiyun default:
589*4882a593Smuzhiyun qcom_glink_send_version(glink);
590*4882a593Smuzhiyun break;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /**
595*4882a593Smuzhiyun * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
596*4882a593Smuzhiyun * wire format and transmit
597*4882a593Smuzhiyun * @glink: The transport to transmit on.
598*4882a593Smuzhiyun * @channel: The glink channel
599*4882a593Smuzhiyun * @granted: The request response to encode.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * Return: 0 on success or standard Linux error code.
602*4882a593Smuzhiyun */
qcom_glink_send_intent_req_ack(struct qcom_glink * glink,struct glink_channel * channel,bool granted)603*4882a593Smuzhiyun static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
604*4882a593Smuzhiyun struct glink_channel *channel,
605*4882a593Smuzhiyun bool granted)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct glink_msg msg;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK);
610*4882a593Smuzhiyun msg.param1 = cpu_to_le16(channel->lcid);
611*4882a593Smuzhiyun msg.param2 = cpu_to_le32(granted);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun return 0;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /**
619*4882a593Smuzhiyun * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
620*4882a593Smuzhiyun * transmit
621*4882a593Smuzhiyun * @glink: The transport to transmit on.
622*4882a593Smuzhiyun * @channel: The local channel
623*4882a593Smuzhiyun * @intent: The intent to pass on to remote.
624*4882a593Smuzhiyun *
625*4882a593Smuzhiyun * Return: 0 on success or standard Linux error code.
626*4882a593Smuzhiyun */
qcom_glink_advertise_intent(struct qcom_glink * glink,struct glink_channel * channel,struct glink_core_rx_intent * intent)627*4882a593Smuzhiyun static int qcom_glink_advertise_intent(struct qcom_glink *glink,
628*4882a593Smuzhiyun struct glink_channel *channel,
629*4882a593Smuzhiyun struct glink_core_rx_intent *intent)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct command {
632*4882a593Smuzhiyun __le16 id;
633*4882a593Smuzhiyun __le16 lcid;
634*4882a593Smuzhiyun __le32 count;
635*4882a593Smuzhiyun __le32 size;
636*4882a593Smuzhiyun __le32 liid;
637*4882a593Smuzhiyun } __packed;
638*4882a593Smuzhiyun struct command cmd;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun cmd.id = cpu_to_le16(RPM_CMD_INTENT);
641*4882a593Smuzhiyun cmd.lcid = cpu_to_le16(channel->lcid);
642*4882a593Smuzhiyun cmd.count = cpu_to_le32(1);
643*4882a593Smuzhiyun cmd.size = cpu_to_le32(intent->size);
644*4882a593Smuzhiyun cmd.liid = cpu_to_le32(intent->id);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun static struct glink_core_rx_intent *
qcom_glink_alloc_intent(struct qcom_glink * glink,struct glink_channel * channel,size_t size,bool reuseable)652*4882a593Smuzhiyun qcom_glink_alloc_intent(struct qcom_glink *glink,
653*4882a593Smuzhiyun struct glink_channel *channel,
654*4882a593Smuzhiyun size_t size,
655*4882a593Smuzhiyun bool reuseable)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
658*4882a593Smuzhiyun int ret;
659*4882a593Smuzhiyun unsigned long flags;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun intent = kzalloc(sizeof(*intent), GFP_KERNEL);
662*4882a593Smuzhiyun if (!intent)
663*4882a593Smuzhiyun return NULL;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun intent->data = kzalloc(size, GFP_KERNEL);
666*4882a593Smuzhiyun if (!intent->data)
667*4882a593Smuzhiyun goto free_intent;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
670*4882a593Smuzhiyun ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
671*4882a593Smuzhiyun if (ret < 0) {
672*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
673*4882a593Smuzhiyun goto free_data;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun intent->id = ret;
678*4882a593Smuzhiyun intent->size = size;
679*4882a593Smuzhiyun intent->reuse = reuseable;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun return intent;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun free_data:
684*4882a593Smuzhiyun kfree(intent->data);
685*4882a593Smuzhiyun free_intent:
686*4882a593Smuzhiyun kfree(intent);
687*4882a593Smuzhiyun return NULL;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
qcom_glink_handle_rx_done(struct qcom_glink * glink,u32 cid,uint32_t iid,bool reuse)690*4882a593Smuzhiyun static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
691*4882a593Smuzhiyun u32 cid, uint32_t iid,
692*4882a593Smuzhiyun bool reuse)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
695*4882a593Smuzhiyun struct glink_channel *channel;
696*4882a593Smuzhiyun unsigned long flags;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
699*4882a593Smuzhiyun channel = idr_find(&glink->rcids, cid);
700*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
701*4882a593Smuzhiyun if (!channel) {
702*4882a593Smuzhiyun dev_err(glink->dev, "invalid channel id received\n");
703*4882a593Smuzhiyun return;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
707*4882a593Smuzhiyun intent = idr_find(&channel->riids, iid);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (!intent) {
710*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
711*4882a593Smuzhiyun dev_err(glink->dev, "invalid intent id received\n");
712*4882a593Smuzhiyun return;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun intent->in_use = false;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (!reuse) {
718*4882a593Smuzhiyun idr_remove(&channel->riids, intent->id);
719*4882a593Smuzhiyun kfree(intent);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /**
725*4882a593Smuzhiyun * qcom_glink_handle_intent_req() - Receive a request for rx_intent
726*4882a593Smuzhiyun * from remote side
727*4882a593Smuzhiyun * @glink: Pointer to the transport interface
728*4882a593Smuzhiyun * @cid: Remote channel ID
729*4882a593Smuzhiyun * @size: size of the intent
730*4882a593Smuzhiyun *
731*4882a593Smuzhiyun * The function searches for the local channel to which the request for
732*4882a593Smuzhiyun * rx_intent has arrived and allocates and notifies the remote back
733*4882a593Smuzhiyun */
qcom_glink_handle_intent_req(struct qcom_glink * glink,u32 cid,size_t size)734*4882a593Smuzhiyun static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
735*4882a593Smuzhiyun u32 cid, size_t size)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
738*4882a593Smuzhiyun struct glink_channel *channel;
739*4882a593Smuzhiyun unsigned long flags;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
742*4882a593Smuzhiyun channel = idr_find(&glink->rcids, cid);
743*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (!channel) {
746*4882a593Smuzhiyun pr_err("%s channel not found for cid %d\n", __func__, cid);
747*4882a593Smuzhiyun return;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun intent = qcom_glink_alloc_intent(glink, channel, size, false);
751*4882a593Smuzhiyun if (intent)
752*4882a593Smuzhiyun qcom_glink_advertise_intent(glink, channel, intent);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun qcom_glink_send_intent_req_ack(glink, channel, !!intent);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
qcom_glink_rx_defer(struct qcom_glink * glink,size_t extra)757*4882a593Smuzhiyun static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun struct glink_defer_cmd *dcmd;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun extra = ALIGN(extra, 8);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
764*4882a593Smuzhiyun dev_dbg(glink->dev, "Insufficient data in rx fifo");
765*4882a593Smuzhiyun return -ENXIO;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
769*4882a593Smuzhiyun if (!dcmd)
770*4882a593Smuzhiyun return -ENOMEM;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun INIT_LIST_HEAD(&dcmd->node);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun qcom_glink_rx_peak(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun spin_lock(&glink->rx_lock);
777*4882a593Smuzhiyun list_add_tail(&dcmd->node, &glink->rx_queue);
778*4882a593Smuzhiyun spin_unlock(&glink->rx_lock);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun schedule_work(&glink->rx_work);
781*4882a593Smuzhiyun qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
qcom_glink_rx_data(struct qcom_glink * glink,size_t avail)786*4882a593Smuzhiyun static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
789*4882a593Smuzhiyun struct glink_channel *channel;
790*4882a593Smuzhiyun struct {
791*4882a593Smuzhiyun struct glink_msg msg;
792*4882a593Smuzhiyun __le32 chunk_size;
793*4882a593Smuzhiyun __le32 left_size;
794*4882a593Smuzhiyun } __packed hdr;
795*4882a593Smuzhiyun unsigned int chunk_size;
796*4882a593Smuzhiyun unsigned int left_size;
797*4882a593Smuzhiyun unsigned int rcid;
798*4882a593Smuzhiyun unsigned int liid;
799*4882a593Smuzhiyun int ret = 0;
800*4882a593Smuzhiyun unsigned long flags;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (avail < sizeof(hdr)) {
803*4882a593Smuzhiyun dev_dbg(glink->dev, "Not enough data in fifo\n");
804*4882a593Smuzhiyun return -EAGAIN;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr));
808*4882a593Smuzhiyun chunk_size = le32_to_cpu(hdr.chunk_size);
809*4882a593Smuzhiyun left_size = le32_to_cpu(hdr.left_size);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if (avail < sizeof(hdr) + chunk_size) {
812*4882a593Smuzhiyun dev_dbg(glink->dev, "Payload not yet in fifo\n");
813*4882a593Smuzhiyun return -EAGAIN;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun rcid = le16_to_cpu(hdr.msg.param1);
817*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
818*4882a593Smuzhiyun channel = idr_find(&glink->rcids, rcid);
819*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
820*4882a593Smuzhiyun if (!channel) {
821*4882a593Smuzhiyun dev_dbg(glink->dev, "Data on non-existing channel\n");
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* Drop the message */
824*4882a593Smuzhiyun goto advance_rx;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (glink->intentless) {
828*4882a593Smuzhiyun /* Might have an ongoing, fragmented, message to append */
829*4882a593Smuzhiyun if (!channel->buf) {
830*4882a593Smuzhiyun intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
831*4882a593Smuzhiyun if (!intent)
832*4882a593Smuzhiyun return -ENOMEM;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun intent->data = kmalloc(chunk_size + left_size,
835*4882a593Smuzhiyun GFP_ATOMIC);
836*4882a593Smuzhiyun if (!intent->data) {
837*4882a593Smuzhiyun kfree(intent);
838*4882a593Smuzhiyun return -ENOMEM;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun intent->id = 0xbabababa;
842*4882a593Smuzhiyun intent->size = chunk_size + left_size;
843*4882a593Smuzhiyun intent->offset = 0;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun channel->buf = intent;
846*4882a593Smuzhiyun } else {
847*4882a593Smuzhiyun intent = channel->buf;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun } else {
850*4882a593Smuzhiyun liid = le32_to_cpu(hdr.msg.param2);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
853*4882a593Smuzhiyun intent = idr_find(&channel->liids, liid);
854*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if (!intent) {
857*4882a593Smuzhiyun dev_err(glink->dev,
858*4882a593Smuzhiyun "no intent found for channel %s intent %d",
859*4882a593Smuzhiyun channel->name, liid);
860*4882a593Smuzhiyun ret = -ENOENT;
861*4882a593Smuzhiyun goto advance_rx;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (intent->size - intent->offset < chunk_size) {
866*4882a593Smuzhiyun dev_err(glink->dev, "Insufficient space in intent\n");
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /* The packet header lied, drop payload */
869*4882a593Smuzhiyun goto advance_rx;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun qcom_glink_rx_peak(glink, intent->data + intent->offset,
873*4882a593Smuzhiyun sizeof(hdr), chunk_size);
874*4882a593Smuzhiyun intent->offset += chunk_size;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* Handle message when no fragments remain to be received */
877*4882a593Smuzhiyun if (!left_size) {
878*4882a593Smuzhiyun spin_lock(&channel->recv_lock);
879*4882a593Smuzhiyun if (channel->ept.cb) {
880*4882a593Smuzhiyun channel->ept.cb(channel->ept.rpdev,
881*4882a593Smuzhiyun intent->data,
882*4882a593Smuzhiyun intent->offset,
883*4882a593Smuzhiyun channel->ept.priv,
884*4882a593Smuzhiyun RPMSG_ADDR_ANY);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun spin_unlock(&channel->recv_lock);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun intent->offset = 0;
889*4882a593Smuzhiyun channel->buf = NULL;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun qcom_glink_rx_done(glink, channel, intent);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun advance_rx:
895*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun return ret;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
qcom_glink_handle_intent(struct qcom_glink * glink,unsigned int cid,unsigned int count,size_t avail)900*4882a593Smuzhiyun static void qcom_glink_handle_intent(struct qcom_glink *glink,
901*4882a593Smuzhiyun unsigned int cid,
902*4882a593Smuzhiyun unsigned int count,
903*4882a593Smuzhiyun size_t avail)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
906*4882a593Smuzhiyun struct glink_channel *channel;
907*4882a593Smuzhiyun struct intent_pair {
908*4882a593Smuzhiyun __le32 size;
909*4882a593Smuzhiyun __le32 iid;
910*4882a593Smuzhiyun };
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun struct {
913*4882a593Smuzhiyun struct glink_msg msg;
914*4882a593Smuzhiyun struct intent_pair intents[];
915*4882a593Smuzhiyun } __packed * msg;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun const size_t msglen = struct_size(msg, intents, count);
918*4882a593Smuzhiyun int ret;
919*4882a593Smuzhiyun int i;
920*4882a593Smuzhiyun unsigned long flags;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if (avail < msglen) {
923*4882a593Smuzhiyun dev_dbg(glink->dev, "Not enough data in fifo\n");
924*4882a593Smuzhiyun return;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
928*4882a593Smuzhiyun channel = idr_find(&glink->rcids, cid);
929*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
930*4882a593Smuzhiyun if (!channel) {
931*4882a593Smuzhiyun dev_err(glink->dev, "intents for non-existing channel\n");
932*4882a593Smuzhiyun return;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun msg = kmalloc(msglen, GFP_ATOMIC);
936*4882a593Smuzhiyun if (!msg)
937*4882a593Smuzhiyun return;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun qcom_glink_rx_peak(glink, msg, 0, msglen);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun for (i = 0; i < count; ++i) {
942*4882a593Smuzhiyun intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
943*4882a593Smuzhiyun if (!intent)
944*4882a593Smuzhiyun break;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun intent->id = le32_to_cpu(msg->intents[i].iid);
947*4882a593Smuzhiyun intent->size = le32_to_cpu(msg->intents[i].size);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
950*4882a593Smuzhiyun ret = idr_alloc(&channel->riids, intent,
951*4882a593Smuzhiyun intent->id, intent->id + 1, GFP_ATOMIC);
952*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if (ret < 0)
955*4882a593Smuzhiyun dev_err(glink->dev, "failed to store remote intent\n");
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun kfree(msg);
959*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
qcom_glink_rx_open_ack(struct qcom_glink * glink,unsigned int lcid)962*4882a593Smuzhiyun static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun struct glink_channel *channel;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun spin_lock(&glink->idr_lock);
967*4882a593Smuzhiyun channel = idr_find(&glink->lcids, lcid);
968*4882a593Smuzhiyun spin_unlock(&glink->idr_lock);
969*4882a593Smuzhiyun if (!channel) {
970*4882a593Smuzhiyun dev_err(glink->dev, "Invalid open ack packet\n");
971*4882a593Smuzhiyun return -EINVAL;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun complete_all(&channel->open_ack);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun return 0;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
qcom_glink_native_intr(int irq,void * data)979*4882a593Smuzhiyun static irqreturn_t qcom_glink_native_intr(int irq, void *data)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun struct qcom_glink *glink = data;
982*4882a593Smuzhiyun struct glink_msg msg;
983*4882a593Smuzhiyun unsigned int param1;
984*4882a593Smuzhiyun unsigned int param2;
985*4882a593Smuzhiyun unsigned int avail;
986*4882a593Smuzhiyun unsigned int cmd;
987*4882a593Smuzhiyun int ret = 0;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun for (;;) {
990*4882a593Smuzhiyun avail = qcom_glink_rx_avail(glink);
991*4882a593Smuzhiyun if (avail < sizeof(msg))
992*4882a593Smuzhiyun break;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun qcom_glink_rx_peak(glink, &msg, 0, sizeof(msg));
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun cmd = le16_to_cpu(msg.cmd);
997*4882a593Smuzhiyun param1 = le16_to_cpu(msg.param1);
998*4882a593Smuzhiyun param2 = le32_to_cpu(msg.param2);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun switch (cmd) {
1001*4882a593Smuzhiyun case RPM_CMD_VERSION:
1002*4882a593Smuzhiyun case RPM_CMD_VERSION_ACK:
1003*4882a593Smuzhiyun case RPM_CMD_CLOSE:
1004*4882a593Smuzhiyun case RPM_CMD_CLOSE_ACK:
1005*4882a593Smuzhiyun case RPM_CMD_RX_INTENT_REQ:
1006*4882a593Smuzhiyun ret = qcom_glink_rx_defer(glink, 0);
1007*4882a593Smuzhiyun break;
1008*4882a593Smuzhiyun case RPM_CMD_OPEN_ACK:
1009*4882a593Smuzhiyun ret = qcom_glink_rx_open_ack(glink, param1);
1010*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1011*4882a593Smuzhiyun break;
1012*4882a593Smuzhiyun case RPM_CMD_OPEN:
1013*4882a593Smuzhiyun ret = qcom_glink_rx_defer(glink, param2);
1014*4882a593Smuzhiyun break;
1015*4882a593Smuzhiyun case RPM_CMD_TX_DATA:
1016*4882a593Smuzhiyun case RPM_CMD_TX_DATA_CONT:
1017*4882a593Smuzhiyun ret = qcom_glink_rx_data(glink, avail);
1018*4882a593Smuzhiyun break;
1019*4882a593Smuzhiyun case RPM_CMD_READ_NOTIF:
1020*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun mbox_send_message(glink->mbox_chan, NULL);
1023*4882a593Smuzhiyun mbox_client_txdone(glink->mbox_chan, 0);
1024*4882a593Smuzhiyun break;
1025*4882a593Smuzhiyun case RPM_CMD_INTENT:
1026*4882a593Smuzhiyun qcom_glink_handle_intent(glink, param1, param2, avail);
1027*4882a593Smuzhiyun break;
1028*4882a593Smuzhiyun case RPM_CMD_RX_DONE:
1029*4882a593Smuzhiyun qcom_glink_handle_rx_done(glink, param1, param2, false);
1030*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1031*4882a593Smuzhiyun break;
1032*4882a593Smuzhiyun case RPM_CMD_RX_DONE_W_REUSE:
1033*4882a593Smuzhiyun qcom_glink_handle_rx_done(glink, param1, param2, true);
1034*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1035*4882a593Smuzhiyun break;
1036*4882a593Smuzhiyun case RPM_CMD_RX_INTENT_REQ_ACK:
1037*4882a593Smuzhiyun qcom_glink_handle_intent_req_ack(glink, param1, param2);
1038*4882a593Smuzhiyun qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
1039*4882a593Smuzhiyun break;
1040*4882a593Smuzhiyun default:
1041*4882a593Smuzhiyun dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
1042*4882a593Smuzhiyun ret = -EINVAL;
1043*4882a593Smuzhiyun break;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (ret)
1047*4882a593Smuzhiyun break;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun return IRQ_HANDLED;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /* Locally initiated rpmsg_create_ept */
qcom_glink_create_local(struct qcom_glink * glink,const char * name)1054*4882a593Smuzhiyun static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
1055*4882a593Smuzhiyun const char *name)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun struct glink_channel *channel;
1058*4882a593Smuzhiyun int ret;
1059*4882a593Smuzhiyun unsigned long flags;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun channel = qcom_glink_alloc_channel(glink, name);
1062*4882a593Smuzhiyun if (IS_ERR(channel))
1063*4882a593Smuzhiyun return ERR_CAST(channel);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun ret = qcom_glink_send_open_req(glink, channel);
1066*4882a593Smuzhiyun if (ret)
1067*4882a593Smuzhiyun goto release_channel;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
1070*4882a593Smuzhiyun if (!ret)
1071*4882a593Smuzhiyun goto err_timeout;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
1074*4882a593Smuzhiyun if (!ret)
1075*4882a593Smuzhiyun goto err_timeout;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun qcom_glink_send_open_ack(glink, channel);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun return channel;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun err_timeout:
1082*4882a593Smuzhiyun /* qcom_glink_send_open_req() did register the channel in lcids*/
1083*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1084*4882a593Smuzhiyun idr_remove(&glink->lcids, channel->lcid);
1085*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun release_channel:
1088*4882a593Smuzhiyun /* Release qcom_glink_send_open_req() reference */
1089*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1090*4882a593Smuzhiyun /* Release qcom_glink_alloc_channel() reference */
1091*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun return ERR_PTR(-ETIMEDOUT);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /* Remote initiated rpmsg_create_ept */
qcom_glink_create_remote(struct qcom_glink * glink,struct glink_channel * channel)1097*4882a593Smuzhiyun static int qcom_glink_create_remote(struct qcom_glink *glink,
1098*4882a593Smuzhiyun struct glink_channel *channel)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun int ret;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun qcom_glink_send_open_ack(glink, channel);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun ret = qcom_glink_send_open_req(glink, channel);
1105*4882a593Smuzhiyun if (ret)
1106*4882a593Smuzhiyun goto close_link;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
1109*4882a593Smuzhiyun if (!ret) {
1110*4882a593Smuzhiyun ret = -ETIMEDOUT;
1111*4882a593Smuzhiyun goto close_link;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun return 0;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun close_link:
1117*4882a593Smuzhiyun /*
1118*4882a593Smuzhiyun * Send a close request to "undo" our open-ack. The close-ack will
1119*4882a593Smuzhiyun * release qcom_glink_send_open_req() reference and the last reference
1120*4882a593Smuzhiyun * will be relesed after receiving remote_close or transport unregister
1121*4882a593Smuzhiyun * by calling qcom_glink_native_remove().
1122*4882a593Smuzhiyun */
1123*4882a593Smuzhiyun qcom_glink_send_close_req(glink, channel);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun return ret;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
qcom_glink_create_ept(struct rpmsg_device * rpdev,rpmsg_rx_cb_t cb,void * priv,struct rpmsg_channel_info chinfo)1128*4882a593Smuzhiyun static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
1129*4882a593Smuzhiyun rpmsg_rx_cb_t cb,
1130*4882a593Smuzhiyun void *priv,
1131*4882a593Smuzhiyun struct rpmsg_channel_info
1132*4882a593Smuzhiyun chinfo)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun struct glink_channel *parent = to_glink_channel(rpdev->ept);
1135*4882a593Smuzhiyun struct glink_channel *channel;
1136*4882a593Smuzhiyun struct qcom_glink *glink = parent->glink;
1137*4882a593Smuzhiyun struct rpmsg_endpoint *ept;
1138*4882a593Smuzhiyun const char *name = chinfo.name;
1139*4882a593Smuzhiyun int cid;
1140*4882a593Smuzhiyun int ret;
1141*4882a593Smuzhiyun unsigned long flags;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1144*4882a593Smuzhiyun idr_for_each_entry(&glink->rcids, channel, cid) {
1145*4882a593Smuzhiyun if (!strcmp(channel->name, name))
1146*4882a593Smuzhiyun break;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (!channel) {
1151*4882a593Smuzhiyun channel = qcom_glink_create_local(glink, name);
1152*4882a593Smuzhiyun if (IS_ERR(channel))
1153*4882a593Smuzhiyun return NULL;
1154*4882a593Smuzhiyun } else {
1155*4882a593Smuzhiyun ret = qcom_glink_create_remote(glink, channel);
1156*4882a593Smuzhiyun if (ret)
1157*4882a593Smuzhiyun return NULL;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun ept = &channel->ept;
1161*4882a593Smuzhiyun ept->rpdev = rpdev;
1162*4882a593Smuzhiyun ept->cb = cb;
1163*4882a593Smuzhiyun ept->priv = priv;
1164*4882a593Smuzhiyun ept->ops = &glink_endpoint_ops;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun return ept;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
qcom_glink_announce_create(struct rpmsg_device * rpdev)1169*4882a593Smuzhiyun static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun struct glink_channel *channel = to_glink_channel(rpdev->ept);
1172*4882a593Smuzhiyun struct device_node *np = rpdev->dev.of_node;
1173*4882a593Smuzhiyun struct qcom_glink *glink = channel->glink;
1174*4882a593Smuzhiyun struct glink_core_rx_intent *intent;
1175*4882a593Smuzhiyun const struct property *prop = NULL;
1176*4882a593Smuzhiyun __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) };
1177*4882a593Smuzhiyun int num_intents;
1178*4882a593Smuzhiyun int num_groups = 1;
1179*4882a593Smuzhiyun __be32 *val = defaults;
1180*4882a593Smuzhiyun int size;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun if (glink->intentless || !completion_done(&channel->open_ack))
1183*4882a593Smuzhiyun return 0;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun prop = of_find_property(np, "qcom,intents", NULL);
1186*4882a593Smuzhiyun if (prop) {
1187*4882a593Smuzhiyun val = prop->value;
1188*4882a593Smuzhiyun num_groups = prop->length / sizeof(u32) / 2;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun /* Channel is now open, advertise base set of intents */
1192*4882a593Smuzhiyun while (num_groups--) {
1193*4882a593Smuzhiyun size = be32_to_cpup(val++);
1194*4882a593Smuzhiyun num_intents = be32_to_cpup(val++);
1195*4882a593Smuzhiyun while (num_intents--) {
1196*4882a593Smuzhiyun intent = qcom_glink_alloc_intent(glink, channel, size,
1197*4882a593Smuzhiyun true);
1198*4882a593Smuzhiyun if (!intent)
1199*4882a593Smuzhiyun break;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun qcom_glink_advertise_intent(glink, channel, intent);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun return 0;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
qcom_glink_destroy_ept(struct rpmsg_endpoint * ept)1207*4882a593Smuzhiyun static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun struct glink_channel *channel = to_glink_channel(ept);
1210*4882a593Smuzhiyun struct qcom_glink *glink = channel->glink;
1211*4882a593Smuzhiyun unsigned long flags;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun spin_lock_irqsave(&channel->recv_lock, flags);
1214*4882a593Smuzhiyun channel->ept.cb = NULL;
1215*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->recv_lock, flags);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /* Decouple the potential rpdev from the channel */
1218*4882a593Smuzhiyun channel->rpdev = NULL;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun qcom_glink_send_close_req(glink, channel);
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
qcom_glink_request_intent(struct qcom_glink * glink,struct glink_channel * channel,size_t size)1223*4882a593Smuzhiyun static int qcom_glink_request_intent(struct qcom_glink *glink,
1224*4882a593Smuzhiyun struct glink_channel *channel,
1225*4882a593Smuzhiyun size_t size)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun struct {
1228*4882a593Smuzhiyun u16 id;
1229*4882a593Smuzhiyun u16 cid;
1230*4882a593Smuzhiyun u32 size;
1231*4882a593Smuzhiyun } __packed cmd;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun int ret;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun mutex_lock(&channel->intent_req_lock);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun reinit_completion(&channel->intent_req_comp);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun cmd.id = RPM_CMD_RX_INTENT_REQ;
1240*4882a593Smuzhiyun cmd.cid = channel->lcid;
1241*4882a593Smuzhiyun cmd.size = size;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
1244*4882a593Smuzhiyun if (ret)
1245*4882a593Smuzhiyun goto unlock;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
1248*4882a593Smuzhiyun if (!ret) {
1249*4882a593Smuzhiyun dev_err(glink->dev, "intent request timed out\n");
1250*4882a593Smuzhiyun ret = -ETIMEDOUT;
1251*4882a593Smuzhiyun } else {
1252*4882a593Smuzhiyun ret = channel->intent_req_result ? 0 : -ECANCELED;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun unlock:
1256*4882a593Smuzhiyun mutex_unlock(&channel->intent_req_lock);
1257*4882a593Smuzhiyun return ret;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
__qcom_glink_send(struct glink_channel * channel,void * data,int len,bool wait)1260*4882a593Smuzhiyun static int __qcom_glink_send(struct glink_channel *channel,
1261*4882a593Smuzhiyun void *data, int len, bool wait)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun struct qcom_glink *glink = channel->glink;
1264*4882a593Smuzhiyun struct glink_core_rx_intent *intent = NULL;
1265*4882a593Smuzhiyun struct glink_core_rx_intent *tmp;
1266*4882a593Smuzhiyun int iid = 0;
1267*4882a593Smuzhiyun struct {
1268*4882a593Smuzhiyun struct glink_msg msg;
1269*4882a593Smuzhiyun __le32 chunk_size;
1270*4882a593Smuzhiyun __le32 left_size;
1271*4882a593Smuzhiyun } __packed req;
1272*4882a593Smuzhiyun int ret;
1273*4882a593Smuzhiyun unsigned long flags;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun if (!glink->intentless) {
1276*4882a593Smuzhiyun while (!intent) {
1277*4882a593Smuzhiyun spin_lock_irqsave(&channel->intent_lock, flags);
1278*4882a593Smuzhiyun idr_for_each_entry(&channel->riids, tmp, iid) {
1279*4882a593Smuzhiyun if (tmp->size >= len && !tmp->in_use) {
1280*4882a593Smuzhiyun if (!intent)
1281*4882a593Smuzhiyun intent = tmp;
1282*4882a593Smuzhiyun else if (intent->size > tmp->size)
1283*4882a593Smuzhiyun intent = tmp;
1284*4882a593Smuzhiyun if (intent->size == len)
1285*4882a593Smuzhiyun break;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun if (intent)
1289*4882a593Smuzhiyun intent->in_use = true;
1290*4882a593Smuzhiyun spin_unlock_irqrestore(&channel->intent_lock, flags);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /* We found an available intent */
1293*4882a593Smuzhiyun if (intent)
1294*4882a593Smuzhiyun break;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (!wait)
1297*4882a593Smuzhiyun return -EBUSY;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun ret = qcom_glink_request_intent(glink, channel, len);
1300*4882a593Smuzhiyun if (ret < 0)
1301*4882a593Smuzhiyun return ret;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun iid = intent->id;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
1308*4882a593Smuzhiyun req.msg.param1 = cpu_to_le16(channel->lcid);
1309*4882a593Smuzhiyun req.msg.param2 = cpu_to_le32(iid);
1310*4882a593Smuzhiyun req.chunk_size = cpu_to_le32(len);
1311*4882a593Smuzhiyun req.left_size = cpu_to_le32(0);
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun ret = qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun /* Mark intent available if we failed */
1316*4882a593Smuzhiyun if (ret && intent)
1317*4882a593Smuzhiyun intent->in_use = false;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun return ret;
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
qcom_glink_send(struct rpmsg_endpoint * ept,void * data,int len)1322*4882a593Smuzhiyun static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
1323*4882a593Smuzhiyun {
1324*4882a593Smuzhiyun struct glink_channel *channel = to_glink_channel(ept);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun return __qcom_glink_send(channel, data, len, true);
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
qcom_glink_trysend(struct rpmsg_endpoint * ept,void * data,int len)1329*4882a593Smuzhiyun static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun struct glink_channel *channel = to_glink_channel(ept);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun return __qcom_glink_send(channel, data, len, false);
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /*
1337*4882a593Smuzhiyun * Finds the device_node for the glink child interested in this channel.
1338*4882a593Smuzhiyun */
qcom_glink_match_channel(struct device_node * node,const char * channel)1339*4882a593Smuzhiyun static struct device_node *qcom_glink_match_channel(struct device_node *node,
1340*4882a593Smuzhiyun const char *channel)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun struct device_node *child;
1343*4882a593Smuzhiyun const char *name;
1344*4882a593Smuzhiyun const char *key;
1345*4882a593Smuzhiyun int ret;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun for_each_available_child_of_node(node, child) {
1348*4882a593Smuzhiyun key = "qcom,glink-channels";
1349*4882a593Smuzhiyun ret = of_property_read_string(child, key, &name);
1350*4882a593Smuzhiyun if (ret)
1351*4882a593Smuzhiyun continue;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (strcmp(name, channel) == 0)
1354*4882a593Smuzhiyun return child;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun return NULL;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun static const struct rpmsg_device_ops glink_device_ops = {
1361*4882a593Smuzhiyun .create_ept = qcom_glink_create_ept,
1362*4882a593Smuzhiyun .announce_create = qcom_glink_announce_create,
1363*4882a593Smuzhiyun };
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
1366*4882a593Smuzhiyun .destroy_ept = qcom_glink_destroy_ept,
1367*4882a593Smuzhiyun .send = qcom_glink_send,
1368*4882a593Smuzhiyun .trysend = qcom_glink_trysend,
1369*4882a593Smuzhiyun };
1370*4882a593Smuzhiyun
qcom_glink_rpdev_release(struct device * dev)1371*4882a593Smuzhiyun static void qcom_glink_rpdev_release(struct device *dev)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1374*4882a593Smuzhiyun struct glink_channel *channel = to_glink_channel(rpdev->ept);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun channel->rpdev = NULL;
1377*4882a593Smuzhiyun kfree(rpdev);
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
qcom_glink_rx_open(struct qcom_glink * glink,unsigned int rcid,char * name)1380*4882a593Smuzhiyun static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
1381*4882a593Smuzhiyun char *name)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun struct glink_channel *channel;
1384*4882a593Smuzhiyun struct rpmsg_device *rpdev;
1385*4882a593Smuzhiyun bool create_device = false;
1386*4882a593Smuzhiyun struct device_node *node;
1387*4882a593Smuzhiyun int lcid;
1388*4882a593Smuzhiyun int ret;
1389*4882a593Smuzhiyun unsigned long flags;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1392*4882a593Smuzhiyun idr_for_each_entry(&glink->lcids, channel, lcid) {
1393*4882a593Smuzhiyun if (!strcmp(channel->name, name))
1394*4882a593Smuzhiyun break;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun if (!channel) {
1399*4882a593Smuzhiyun channel = qcom_glink_alloc_channel(glink, name);
1400*4882a593Smuzhiyun if (IS_ERR(channel))
1401*4882a593Smuzhiyun return PTR_ERR(channel);
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun /* The opening dance was initiated by the remote */
1404*4882a593Smuzhiyun create_device = true;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1408*4882a593Smuzhiyun ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
1409*4882a593Smuzhiyun if (ret < 0) {
1410*4882a593Smuzhiyun dev_err(glink->dev, "Unable to insert channel into rcid list\n");
1411*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1412*4882a593Smuzhiyun goto free_channel;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun channel->rcid = ret;
1415*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun complete_all(&channel->open_req);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (create_device) {
1420*4882a593Smuzhiyun rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
1421*4882a593Smuzhiyun if (!rpdev) {
1422*4882a593Smuzhiyun ret = -ENOMEM;
1423*4882a593Smuzhiyun goto rcid_remove;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun rpdev->ept = &channel->ept;
1427*4882a593Smuzhiyun strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
1428*4882a593Smuzhiyun rpdev->src = RPMSG_ADDR_ANY;
1429*4882a593Smuzhiyun rpdev->dst = RPMSG_ADDR_ANY;
1430*4882a593Smuzhiyun rpdev->ops = &glink_device_ops;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun node = qcom_glink_match_channel(glink->dev->of_node, name);
1433*4882a593Smuzhiyun rpdev->dev.of_node = node;
1434*4882a593Smuzhiyun rpdev->dev.parent = glink->dev;
1435*4882a593Smuzhiyun rpdev->dev.release = qcom_glink_rpdev_release;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun ret = rpmsg_register_device(rpdev);
1438*4882a593Smuzhiyun if (ret)
1439*4882a593Smuzhiyun goto rcid_remove;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun channel->rpdev = rpdev;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun return 0;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun rcid_remove:
1447*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1448*4882a593Smuzhiyun idr_remove(&glink->rcids, channel->rcid);
1449*4882a593Smuzhiyun channel->rcid = 0;
1450*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1451*4882a593Smuzhiyun free_channel:
1452*4882a593Smuzhiyun /* Release the reference, iff we took it */
1453*4882a593Smuzhiyun if (create_device)
1454*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun return ret;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
qcom_glink_rx_close(struct qcom_glink * glink,unsigned int rcid)1459*4882a593Smuzhiyun static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun struct rpmsg_channel_info chinfo;
1462*4882a593Smuzhiyun struct glink_channel *channel;
1463*4882a593Smuzhiyun unsigned long flags;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1466*4882a593Smuzhiyun channel = idr_find(&glink->rcids, rcid);
1467*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1468*4882a593Smuzhiyun if (WARN(!channel, "close request on unknown channel\n"))
1469*4882a593Smuzhiyun return;
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun /* cancel pending rx_done work */
1472*4882a593Smuzhiyun cancel_work_sync(&channel->intent_work);
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun if (channel->rpdev) {
1475*4882a593Smuzhiyun strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
1476*4882a593Smuzhiyun chinfo.src = RPMSG_ADDR_ANY;
1477*4882a593Smuzhiyun chinfo.dst = RPMSG_ADDR_ANY;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun rpmsg_unregister_device(glink->dev, &chinfo);
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun qcom_glink_send_close_ack(glink, channel->rcid);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1485*4882a593Smuzhiyun idr_remove(&glink->rcids, channel->rcid);
1486*4882a593Smuzhiyun channel->rcid = 0;
1487*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
qcom_glink_rx_close_ack(struct qcom_glink * glink,unsigned int lcid)1492*4882a593Smuzhiyun static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun struct glink_channel *channel;
1495*4882a593Smuzhiyun unsigned long flags;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun spin_lock_irqsave(&glink->idr_lock, flags);
1498*4882a593Smuzhiyun channel = idr_find(&glink->lcids, lcid);
1499*4882a593Smuzhiyun if (WARN(!channel, "close ack on unknown channel\n")) {
1500*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1501*4882a593Smuzhiyun return;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun idr_remove(&glink->lcids, channel->lcid);
1505*4882a593Smuzhiyun channel->lcid = 0;
1506*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->idr_lock, flags);
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
qcom_glink_work(struct work_struct * work)1511*4882a593Smuzhiyun static void qcom_glink_work(struct work_struct *work)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun struct qcom_glink *glink = container_of(work, struct qcom_glink,
1514*4882a593Smuzhiyun rx_work);
1515*4882a593Smuzhiyun struct glink_defer_cmd *dcmd;
1516*4882a593Smuzhiyun struct glink_msg *msg;
1517*4882a593Smuzhiyun unsigned long flags;
1518*4882a593Smuzhiyun unsigned int param1;
1519*4882a593Smuzhiyun unsigned int param2;
1520*4882a593Smuzhiyun unsigned int cmd;
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun for (;;) {
1523*4882a593Smuzhiyun spin_lock_irqsave(&glink->rx_lock, flags);
1524*4882a593Smuzhiyun if (list_empty(&glink->rx_queue)) {
1525*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->rx_lock, flags);
1526*4882a593Smuzhiyun break;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun dcmd = list_first_entry(&glink->rx_queue,
1529*4882a593Smuzhiyun struct glink_defer_cmd, node);
1530*4882a593Smuzhiyun list_del(&dcmd->node);
1531*4882a593Smuzhiyun spin_unlock_irqrestore(&glink->rx_lock, flags);
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun msg = &dcmd->msg;
1534*4882a593Smuzhiyun cmd = le16_to_cpu(msg->cmd);
1535*4882a593Smuzhiyun param1 = le16_to_cpu(msg->param1);
1536*4882a593Smuzhiyun param2 = le32_to_cpu(msg->param2);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun switch (cmd) {
1539*4882a593Smuzhiyun case RPM_CMD_VERSION:
1540*4882a593Smuzhiyun qcom_glink_receive_version(glink, param1, param2);
1541*4882a593Smuzhiyun break;
1542*4882a593Smuzhiyun case RPM_CMD_VERSION_ACK:
1543*4882a593Smuzhiyun qcom_glink_receive_version_ack(glink, param1, param2);
1544*4882a593Smuzhiyun break;
1545*4882a593Smuzhiyun case RPM_CMD_OPEN:
1546*4882a593Smuzhiyun qcom_glink_rx_open(glink, param1, msg->data);
1547*4882a593Smuzhiyun break;
1548*4882a593Smuzhiyun case RPM_CMD_CLOSE:
1549*4882a593Smuzhiyun qcom_glink_rx_close(glink, param1);
1550*4882a593Smuzhiyun break;
1551*4882a593Smuzhiyun case RPM_CMD_CLOSE_ACK:
1552*4882a593Smuzhiyun qcom_glink_rx_close_ack(glink, param1);
1553*4882a593Smuzhiyun break;
1554*4882a593Smuzhiyun case RPM_CMD_RX_INTENT_REQ:
1555*4882a593Smuzhiyun qcom_glink_handle_intent_req(glink, param1, param2);
1556*4882a593Smuzhiyun break;
1557*4882a593Smuzhiyun default:
1558*4882a593Smuzhiyun WARN(1, "Unknown defer object %d\n", cmd);
1559*4882a593Smuzhiyun break;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun kfree(dcmd);
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
qcom_glink_cancel_rx_work(struct qcom_glink * glink)1566*4882a593Smuzhiyun static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun struct glink_defer_cmd *dcmd;
1569*4882a593Smuzhiyun struct glink_defer_cmd *tmp;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun /* cancel any pending deferred rx_work */
1572*4882a593Smuzhiyun cancel_work_sync(&glink->rx_work);
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
1575*4882a593Smuzhiyun kfree(dcmd);
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun
rpmsg_name_show(struct device * dev,struct device_attribute * attr,char * buf)1578*4882a593Smuzhiyun static ssize_t rpmsg_name_show(struct device *dev,
1579*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun int ret = 0;
1582*4882a593Smuzhiyun const char *name;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun ret = of_property_read_string(dev->of_node, "label", &name);
1585*4882a593Smuzhiyun if (ret < 0)
1586*4882a593Smuzhiyun name = dev->of_node->name;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun static DEVICE_ATTR_RO(rpmsg_name);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun static struct attribute *qcom_glink_attrs[] = {
1593*4882a593Smuzhiyun &dev_attr_rpmsg_name.attr,
1594*4882a593Smuzhiyun NULL
1595*4882a593Smuzhiyun };
1596*4882a593Smuzhiyun ATTRIBUTE_GROUPS(qcom_glink);
1597*4882a593Smuzhiyun
qcom_glink_device_release(struct device * dev)1598*4882a593Smuzhiyun static void qcom_glink_device_release(struct device *dev)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1601*4882a593Smuzhiyun struct glink_channel *channel = to_glink_channel(rpdev->ept);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun /* Release qcom_glink_alloc_channel() reference */
1604*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1605*4882a593Smuzhiyun kfree(rpdev);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun
qcom_glink_create_chrdev(struct qcom_glink * glink)1608*4882a593Smuzhiyun static int qcom_glink_create_chrdev(struct qcom_glink *glink)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun struct rpmsg_device *rpdev;
1611*4882a593Smuzhiyun struct glink_channel *channel;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
1614*4882a593Smuzhiyun if (!rpdev)
1615*4882a593Smuzhiyun return -ENOMEM;
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev");
1618*4882a593Smuzhiyun if (IS_ERR(channel)) {
1619*4882a593Smuzhiyun kfree(rpdev);
1620*4882a593Smuzhiyun return PTR_ERR(channel);
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun channel->rpdev = rpdev;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun rpdev->ept = &channel->ept;
1625*4882a593Smuzhiyun rpdev->ops = &glink_device_ops;
1626*4882a593Smuzhiyun rpdev->dev.parent = glink->dev;
1627*4882a593Smuzhiyun rpdev->dev.release = qcom_glink_device_release;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun return rpmsg_chrdev_register_device(rpdev);
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun
qcom_glink_native_probe(struct device * dev,unsigned long features,struct qcom_glink_pipe * rx,struct qcom_glink_pipe * tx,bool intentless)1632*4882a593Smuzhiyun struct qcom_glink *qcom_glink_native_probe(struct device *dev,
1633*4882a593Smuzhiyun unsigned long features,
1634*4882a593Smuzhiyun struct qcom_glink_pipe *rx,
1635*4882a593Smuzhiyun struct qcom_glink_pipe *tx,
1636*4882a593Smuzhiyun bool intentless)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun int irq;
1639*4882a593Smuzhiyun int ret;
1640*4882a593Smuzhiyun struct qcom_glink *glink;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
1643*4882a593Smuzhiyun if (!glink)
1644*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun glink->dev = dev;
1647*4882a593Smuzhiyun glink->tx_pipe = tx;
1648*4882a593Smuzhiyun glink->rx_pipe = rx;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun glink->features = features;
1651*4882a593Smuzhiyun glink->intentless = intentless;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun spin_lock_init(&glink->tx_lock);
1654*4882a593Smuzhiyun spin_lock_init(&glink->rx_lock);
1655*4882a593Smuzhiyun INIT_LIST_HEAD(&glink->rx_queue);
1656*4882a593Smuzhiyun INIT_WORK(&glink->rx_work, qcom_glink_work);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun spin_lock_init(&glink->idr_lock);
1659*4882a593Smuzhiyun idr_init(&glink->lcids);
1660*4882a593Smuzhiyun idr_init(&glink->rcids);
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun glink->dev->groups = qcom_glink_groups;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun ret = device_add_groups(dev, qcom_glink_groups);
1665*4882a593Smuzhiyun if (ret)
1666*4882a593Smuzhiyun dev_err(dev, "failed to add groups\n");
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun ret = of_property_read_string(dev->of_node, "label", &glink->name);
1669*4882a593Smuzhiyun if (ret < 0)
1670*4882a593Smuzhiyun glink->name = dev->of_node->name;
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun glink->mbox_client.dev = dev;
1673*4882a593Smuzhiyun glink->mbox_client.knows_txdone = true;
1674*4882a593Smuzhiyun glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
1675*4882a593Smuzhiyun if (IS_ERR(glink->mbox_chan)) {
1676*4882a593Smuzhiyun if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
1677*4882a593Smuzhiyun dev_err(dev, "failed to acquire IPC channel\n");
1678*4882a593Smuzhiyun return ERR_CAST(glink->mbox_chan);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun irq = of_irq_get(dev->of_node, 0);
1682*4882a593Smuzhiyun ret = devm_request_irq(dev, irq,
1683*4882a593Smuzhiyun qcom_glink_native_intr,
1684*4882a593Smuzhiyun IRQF_NO_SUSPEND | IRQF_SHARED,
1685*4882a593Smuzhiyun "glink-native", glink);
1686*4882a593Smuzhiyun if (ret) {
1687*4882a593Smuzhiyun dev_err(dev, "failed to request IRQ\n");
1688*4882a593Smuzhiyun return ERR_PTR(ret);
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun glink->irq = irq;
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun ret = qcom_glink_send_version(glink);
1694*4882a593Smuzhiyun if (ret)
1695*4882a593Smuzhiyun return ERR_PTR(ret);
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun ret = qcom_glink_create_chrdev(glink);
1698*4882a593Smuzhiyun if (ret)
1699*4882a593Smuzhiyun dev_err(glink->dev, "failed to register chrdev\n");
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun return glink;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qcom_glink_native_probe);
1704*4882a593Smuzhiyun
qcom_glink_remove_device(struct device * dev,void * data)1705*4882a593Smuzhiyun static int qcom_glink_remove_device(struct device *dev, void *data)
1706*4882a593Smuzhiyun {
1707*4882a593Smuzhiyun device_unregister(dev);
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun return 0;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun
qcom_glink_native_remove(struct qcom_glink * glink)1712*4882a593Smuzhiyun void qcom_glink_native_remove(struct qcom_glink *glink)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun struct glink_channel *channel;
1715*4882a593Smuzhiyun int cid;
1716*4882a593Smuzhiyun int ret;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun disable_irq(glink->irq);
1719*4882a593Smuzhiyun qcom_glink_cancel_rx_work(glink);
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
1722*4882a593Smuzhiyun if (ret)
1723*4882a593Smuzhiyun dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun /* Release any defunct local channels, waiting for close-ack */
1726*4882a593Smuzhiyun idr_for_each_entry(&glink->lcids, channel, cid)
1727*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun /* Release any defunct local channels, waiting for close-req */
1730*4882a593Smuzhiyun idr_for_each_entry(&glink->rcids, channel, cid)
1731*4882a593Smuzhiyun kref_put(&channel->refcount, qcom_glink_channel_release);
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun idr_destroy(&glink->lcids);
1734*4882a593Smuzhiyun idr_destroy(&glink->rcids);
1735*4882a593Smuzhiyun mbox_free_channel(glink->mbox_chan);
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
1738*4882a593Smuzhiyun
qcom_glink_native_unregister(struct qcom_glink * glink)1739*4882a593Smuzhiyun void qcom_glink_native_unregister(struct qcom_glink *glink)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun device_unregister(glink->dev);
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm GLINK driver");
1746*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1747