1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Greybus operations
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2014-2015 Google Inc.
6*4882a593Smuzhiyun * Copyright 2014-2015 Linaro Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/wait.h>
14*4882a593Smuzhiyun #include <linux/workqueue.h>
15*4882a593Smuzhiyun #include <linux/greybus.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "greybus_trace.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static struct kmem_cache *gb_operation_cache;
20*4882a593Smuzhiyun static struct kmem_cache *gb_message_cache;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /* Workqueue to handle Greybus operation completions. */
23*4882a593Smuzhiyun static struct workqueue_struct *gb_operation_completion_wq;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* Wait queue for synchronous cancellations. */
26*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Protects updates to operation->errno.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun static DEFINE_SPINLOCK(gb_operations_lock);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static int gb_operation_response_send(struct gb_operation *operation,
34*4882a593Smuzhiyun int errno);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * Increment operation active count and add to connection list unless the
38*4882a593Smuzhiyun * connection is going away.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Caller holds operation reference.
41*4882a593Smuzhiyun */
gb_operation_get_active(struct gb_operation * operation)42*4882a593Smuzhiyun static int gb_operation_get_active(struct gb_operation *operation)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
45*4882a593Smuzhiyun unsigned long flags;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun spin_lock_irqsave(&connection->lock, flags);
48*4882a593Smuzhiyun switch (connection->state) {
49*4882a593Smuzhiyun case GB_CONNECTION_STATE_ENABLED:
50*4882a593Smuzhiyun break;
51*4882a593Smuzhiyun case GB_CONNECTION_STATE_ENABLED_TX:
52*4882a593Smuzhiyun if (gb_operation_is_incoming(operation))
53*4882a593Smuzhiyun goto err_unlock;
54*4882a593Smuzhiyun break;
55*4882a593Smuzhiyun case GB_CONNECTION_STATE_DISCONNECTING:
56*4882a593Smuzhiyun if (!gb_operation_is_core(operation))
57*4882a593Smuzhiyun goto err_unlock;
58*4882a593Smuzhiyun break;
59*4882a593Smuzhiyun default:
60*4882a593Smuzhiyun goto err_unlock;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun if (operation->active++ == 0)
64*4882a593Smuzhiyun list_add_tail(&operation->links, &connection->operations);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun trace_gb_operation_get_active(operation);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun spin_unlock_irqrestore(&connection->lock, flags);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun err_unlock:
73*4882a593Smuzhiyun spin_unlock_irqrestore(&connection->lock, flags);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return -ENOTCONN;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Caller holds operation reference. */
gb_operation_put_active(struct gb_operation * operation)79*4882a593Smuzhiyun static void gb_operation_put_active(struct gb_operation *operation)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
82*4882a593Smuzhiyun unsigned long flags;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun spin_lock_irqsave(&connection->lock, flags);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun trace_gb_operation_put_active(operation);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (--operation->active == 0) {
89*4882a593Smuzhiyun list_del(&operation->links);
90*4882a593Smuzhiyun if (atomic_read(&operation->waiters))
91*4882a593Smuzhiyun wake_up(&gb_operation_cancellation_queue);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun spin_unlock_irqrestore(&connection->lock, flags);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
gb_operation_is_active(struct gb_operation * operation)96*4882a593Smuzhiyun static bool gb_operation_is_active(struct gb_operation *operation)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
99*4882a593Smuzhiyun unsigned long flags;
100*4882a593Smuzhiyun bool ret;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun spin_lock_irqsave(&connection->lock, flags);
103*4882a593Smuzhiyun ret = operation->active;
104*4882a593Smuzhiyun spin_unlock_irqrestore(&connection->lock, flags);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return ret;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Set an operation's result.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * Initially an outgoing operation's errno value is -EBADR.
113*4882a593Smuzhiyun * If no error occurs before sending the request message the only
114*4882a593Smuzhiyun * valid value operation->errno can be set to is -EINPROGRESS,
115*4882a593Smuzhiyun * indicating the request has been (or rather is about to be) sent.
116*4882a593Smuzhiyun * At that point nobody should be looking at the result until the
117*4882a593Smuzhiyun * response arrives.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * The first time the result gets set after the request has been
120*4882a593Smuzhiyun * sent, that result "sticks." That is, if two concurrent threads
121*4882a593Smuzhiyun * race to set the result, the first one wins. The return value
122*4882a593Smuzhiyun * tells the caller whether its result was recorded; if not the
123*4882a593Smuzhiyun * caller has nothing more to do.
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * The result value -EILSEQ is reserved to signal an implementation
126*4882a593Smuzhiyun * error; if it's ever observed, the code performing the request has
127*4882a593Smuzhiyun * done something fundamentally wrong. It is an error to try to set
128*4882a593Smuzhiyun * the result to -EBADR, and attempts to do so result in a warning,
129*4882a593Smuzhiyun * and -EILSEQ is used instead. Similarly, the only valid result
130*4882a593Smuzhiyun * value to set for an operation in initial state is -EINPROGRESS.
131*4882a593Smuzhiyun * Attempts to do otherwise will also record a (successful) -EILSEQ
132*4882a593Smuzhiyun * operation result.
133*4882a593Smuzhiyun */
gb_operation_result_set(struct gb_operation * operation,int result)134*4882a593Smuzhiyun static bool gb_operation_result_set(struct gb_operation *operation, int result)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun unsigned long flags;
137*4882a593Smuzhiyun int prev;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (result == -EINPROGRESS) {
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * -EINPROGRESS is used to indicate the request is
142*4882a593Smuzhiyun * in flight. It should be the first result value
143*4882a593Smuzhiyun * set after the initial -EBADR. Issue a warning
144*4882a593Smuzhiyun * and record an implementation error if it's
145*4882a593Smuzhiyun * set at any other time.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun spin_lock_irqsave(&gb_operations_lock, flags);
148*4882a593Smuzhiyun prev = operation->errno;
149*4882a593Smuzhiyun if (prev == -EBADR)
150*4882a593Smuzhiyun operation->errno = result;
151*4882a593Smuzhiyun else
152*4882a593Smuzhiyun operation->errno = -EILSEQ;
153*4882a593Smuzhiyun spin_unlock_irqrestore(&gb_operations_lock, flags);
154*4882a593Smuzhiyun WARN_ON(prev != -EBADR);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return true;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * The first result value set after a request has been sent
161*4882a593Smuzhiyun * will be the final result of the operation. Subsequent
162*4882a593Smuzhiyun * attempts to set the result are ignored.
163*4882a593Smuzhiyun *
164*4882a593Smuzhiyun * Note that -EBADR is a reserved "initial state" result
165*4882a593Smuzhiyun * value. Attempts to set this value result in a warning,
166*4882a593Smuzhiyun * and the result code is set to -EILSEQ instead.
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun if (WARN_ON(result == -EBADR))
169*4882a593Smuzhiyun result = -EILSEQ; /* Nobody should be setting -EBADR */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun spin_lock_irqsave(&gb_operations_lock, flags);
172*4882a593Smuzhiyun prev = operation->errno;
173*4882a593Smuzhiyun if (prev == -EINPROGRESS)
174*4882a593Smuzhiyun operation->errno = result; /* First and final result */
175*4882a593Smuzhiyun spin_unlock_irqrestore(&gb_operations_lock, flags);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return prev == -EINPROGRESS;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
gb_operation_result(struct gb_operation * operation)180*4882a593Smuzhiyun int gb_operation_result(struct gb_operation *operation)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun int result = operation->errno;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun WARN_ON(result == -EBADR);
185*4882a593Smuzhiyun WARN_ON(result == -EINPROGRESS);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return result;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_result);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Looks up an outgoing operation on a connection and returns a refcounted
193*4882a593Smuzhiyun * pointer if found, or NULL otherwise.
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun static struct gb_operation *
gb_operation_find_outgoing(struct gb_connection * connection,u16 operation_id)196*4882a593Smuzhiyun gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct gb_operation *operation;
199*4882a593Smuzhiyun unsigned long flags;
200*4882a593Smuzhiyun bool found = false;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun spin_lock_irqsave(&connection->lock, flags);
203*4882a593Smuzhiyun list_for_each_entry(operation, &connection->operations, links)
204*4882a593Smuzhiyun if (operation->id == operation_id &&
205*4882a593Smuzhiyun !gb_operation_is_incoming(operation)) {
206*4882a593Smuzhiyun gb_operation_get(operation);
207*4882a593Smuzhiyun found = true;
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun spin_unlock_irqrestore(&connection->lock, flags);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun return found ? operation : NULL;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
gb_message_send(struct gb_message * message,gfp_t gfp)215*4882a593Smuzhiyun static int gb_message_send(struct gb_message *message, gfp_t gfp)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct gb_connection *connection = message->operation->connection;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun trace_gb_message_send(message);
220*4882a593Smuzhiyun return connection->hd->driver->message_send(connection->hd,
221*4882a593Smuzhiyun connection->hd_cport_id,
222*4882a593Smuzhiyun message,
223*4882a593Smuzhiyun gfp);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * Cancel a message we have passed to the host device layer to be sent.
228*4882a593Smuzhiyun */
gb_message_cancel(struct gb_message * message)229*4882a593Smuzhiyun static void gb_message_cancel(struct gb_message *message)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct gb_host_device *hd = message->operation->connection->hd;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun hd->driver->message_cancel(message);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
gb_operation_request_handle(struct gb_operation * operation)236*4882a593Smuzhiyun static void gb_operation_request_handle(struct gb_operation *operation)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
239*4882a593Smuzhiyun int status;
240*4882a593Smuzhiyun int ret;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (connection->handler) {
243*4882a593Smuzhiyun status = connection->handler(operation);
244*4882a593Smuzhiyun } else {
245*4882a593Smuzhiyun dev_err(&connection->hd->dev,
246*4882a593Smuzhiyun "%s: unexpected incoming request of type 0x%02x\n",
247*4882a593Smuzhiyun connection->name, operation->type);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun status = -EPROTONOSUPPORT;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ret = gb_operation_response_send(operation, status);
253*4882a593Smuzhiyun if (ret) {
254*4882a593Smuzhiyun dev_err(&connection->hd->dev,
255*4882a593Smuzhiyun "%s: failed to send response %d for type 0x%02x: %d\n",
256*4882a593Smuzhiyun connection->name, status, operation->type, ret);
257*4882a593Smuzhiyun return;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * Process operation work.
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * For incoming requests, call the protocol request handler. The operation
265*4882a593Smuzhiyun * result should be -EINPROGRESS at this point.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * For outgoing requests, the operation result value should have
268*4882a593Smuzhiyun * been set before queueing this. The operation callback function
269*4882a593Smuzhiyun * allows the original requester to know the request has completed
270*4882a593Smuzhiyun * and its result is available.
271*4882a593Smuzhiyun */
gb_operation_work(struct work_struct * work)272*4882a593Smuzhiyun static void gb_operation_work(struct work_struct *work)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct gb_operation *operation;
275*4882a593Smuzhiyun int ret;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun operation = container_of(work, struct gb_operation, work);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (gb_operation_is_incoming(operation)) {
280*4882a593Smuzhiyun gb_operation_request_handle(operation);
281*4882a593Smuzhiyun } else {
282*4882a593Smuzhiyun ret = del_timer_sync(&operation->timer);
283*4882a593Smuzhiyun if (!ret) {
284*4882a593Smuzhiyun /* Cancel request message if scheduled by timeout. */
285*4882a593Smuzhiyun if (gb_operation_result(operation) == -ETIMEDOUT)
286*4882a593Smuzhiyun gb_message_cancel(operation->request);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun operation->callback(operation);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun gb_operation_put_active(operation);
293*4882a593Smuzhiyun gb_operation_put(operation);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
gb_operation_timeout(struct timer_list * t)296*4882a593Smuzhiyun static void gb_operation_timeout(struct timer_list *t)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct gb_operation *operation = from_timer(operation, t, timer);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (gb_operation_result_set(operation, -ETIMEDOUT)) {
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * A stuck request message will be cancelled from the
303*4882a593Smuzhiyun * workqueue.
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun queue_work(gb_operation_completion_wq, &operation->work);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
gb_operation_message_init(struct gb_host_device * hd,struct gb_message * message,u16 operation_id,size_t payload_size,u8 type)309*4882a593Smuzhiyun static void gb_operation_message_init(struct gb_host_device *hd,
310*4882a593Smuzhiyun struct gb_message *message,
311*4882a593Smuzhiyun u16 operation_id,
312*4882a593Smuzhiyun size_t payload_size, u8 type)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct gb_operation_msg_hdr *header;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun header = message->buffer;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun message->header = header;
319*4882a593Smuzhiyun message->payload = payload_size ? header + 1 : NULL;
320*4882a593Smuzhiyun message->payload_size = payload_size;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * The type supplied for incoming message buffers will be
324*4882a593Smuzhiyun * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
325*4882a593Smuzhiyun * arriving data so there's no need to initialize the message header.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun if (type != GB_REQUEST_TYPE_INVALID) {
328*4882a593Smuzhiyun u16 message_size = (u16)(sizeof(*header) + payload_size);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * For a request, the operation id gets filled in
332*4882a593Smuzhiyun * when the message is sent. For a response, it
333*4882a593Smuzhiyun * will be copied from the request by the caller.
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * The result field in a request message must be
336*4882a593Smuzhiyun * zero. It will be set just prior to sending for
337*4882a593Smuzhiyun * a response.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun header->size = cpu_to_le16(message_size);
340*4882a593Smuzhiyun header->operation_id = 0;
341*4882a593Smuzhiyun header->type = type;
342*4882a593Smuzhiyun header->result = 0;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /*
347*4882a593Smuzhiyun * Allocate a message to be used for an operation request or response.
348*4882a593Smuzhiyun * Both types of message contain a common header. The request message
349*4882a593Smuzhiyun * for an outgoing operation is outbound, as is the response message
350*4882a593Smuzhiyun * for an incoming operation. The message header for an outbound
351*4882a593Smuzhiyun * message is partially initialized here.
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * The headers for inbound messages don't need to be initialized;
354*4882a593Smuzhiyun * they'll be filled in by arriving data.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * Our message buffers have the following layout:
357*4882a593Smuzhiyun * message header \_ these combined are
358*4882a593Smuzhiyun * message payload / the message size
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun static struct gb_message *
gb_operation_message_alloc(struct gb_host_device * hd,u8 type,size_t payload_size,gfp_t gfp_flags)361*4882a593Smuzhiyun gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
362*4882a593Smuzhiyun size_t payload_size, gfp_t gfp_flags)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct gb_message *message;
365*4882a593Smuzhiyun struct gb_operation_msg_hdr *header;
366*4882a593Smuzhiyun size_t message_size = payload_size + sizeof(*header);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (message_size > hd->buffer_size_max) {
369*4882a593Smuzhiyun dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
370*4882a593Smuzhiyun message_size, hd->buffer_size_max);
371*4882a593Smuzhiyun return NULL;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Allocate the message structure and buffer. */
375*4882a593Smuzhiyun message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
376*4882a593Smuzhiyun if (!message)
377*4882a593Smuzhiyun return NULL;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun message->buffer = kzalloc(message_size, gfp_flags);
380*4882a593Smuzhiyun if (!message->buffer)
381*4882a593Smuzhiyun goto err_free_message;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Initialize the message. Operation id is filled in later. */
384*4882a593Smuzhiyun gb_operation_message_init(hd, message, 0, payload_size, type);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return message;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun err_free_message:
389*4882a593Smuzhiyun kmem_cache_free(gb_message_cache, message);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return NULL;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
gb_operation_message_free(struct gb_message * message)394*4882a593Smuzhiyun static void gb_operation_message_free(struct gb_message *message)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun kfree(message->buffer);
397*4882a593Smuzhiyun kmem_cache_free(gb_message_cache, message);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Map an enum gb_operation_status value (which is represented in a
402*4882a593Smuzhiyun * message as a single byte) to an appropriate Linux negative errno.
403*4882a593Smuzhiyun */
gb_operation_status_map(u8 status)404*4882a593Smuzhiyun static int gb_operation_status_map(u8 status)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun switch (status) {
407*4882a593Smuzhiyun case GB_OP_SUCCESS:
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun case GB_OP_INTERRUPTED:
410*4882a593Smuzhiyun return -EINTR;
411*4882a593Smuzhiyun case GB_OP_TIMEOUT:
412*4882a593Smuzhiyun return -ETIMEDOUT;
413*4882a593Smuzhiyun case GB_OP_NO_MEMORY:
414*4882a593Smuzhiyun return -ENOMEM;
415*4882a593Smuzhiyun case GB_OP_PROTOCOL_BAD:
416*4882a593Smuzhiyun return -EPROTONOSUPPORT;
417*4882a593Smuzhiyun case GB_OP_OVERFLOW:
418*4882a593Smuzhiyun return -EMSGSIZE;
419*4882a593Smuzhiyun case GB_OP_INVALID:
420*4882a593Smuzhiyun return -EINVAL;
421*4882a593Smuzhiyun case GB_OP_RETRY:
422*4882a593Smuzhiyun return -EAGAIN;
423*4882a593Smuzhiyun case GB_OP_NONEXISTENT:
424*4882a593Smuzhiyun return -ENODEV;
425*4882a593Smuzhiyun case GB_OP_MALFUNCTION:
426*4882a593Smuzhiyun return -EILSEQ;
427*4882a593Smuzhiyun case GB_OP_UNKNOWN_ERROR:
428*4882a593Smuzhiyun default:
429*4882a593Smuzhiyun return -EIO;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Map a Linux errno value (from operation->errno) into the value
435*4882a593Smuzhiyun * that should represent it in a response message status sent
436*4882a593Smuzhiyun * over the wire. Returns an enum gb_operation_status value (which
437*4882a593Smuzhiyun * is represented in a message as a single byte).
438*4882a593Smuzhiyun */
gb_operation_errno_map(int errno)439*4882a593Smuzhiyun static u8 gb_operation_errno_map(int errno)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun switch (errno) {
442*4882a593Smuzhiyun case 0:
443*4882a593Smuzhiyun return GB_OP_SUCCESS;
444*4882a593Smuzhiyun case -EINTR:
445*4882a593Smuzhiyun return GB_OP_INTERRUPTED;
446*4882a593Smuzhiyun case -ETIMEDOUT:
447*4882a593Smuzhiyun return GB_OP_TIMEOUT;
448*4882a593Smuzhiyun case -ENOMEM:
449*4882a593Smuzhiyun return GB_OP_NO_MEMORY;
450*4882a593Smuzhiyun case -EPROTONOSUPPORT:
451*4882a593Smuzhiyun return GB_OP_PROTOCOL_BAD;
452*4882a593Smuzhiyun case -EMSGSIZE:
453*4882a593Smuzhiyun return GB_OP_OVERFLOW; /* Could be underflow too */
454*4882a593Smuzhiyun case -EINVAL:
455*4882a593Smuzhiyun return GB_OP_INVALID;
456*4882a593Smuzhiyun case -EAGAIN:
457*4882a593Smuzhiyun return GB_OP_RETRY;
458*4882a593Smuzhiyun case -EILSEQ:
459*4882a593Smuzhiyun return GB_OP_MALFUNCTION;
460*4882a593Smuzhiyun case -ENODEV:
461*4882a593Smuzhiyun return GB_OP_NONEXISTENT;
462*4882a593Smuzhiyun case -EIO:
463*4882a593Smuzhiyun default:
464*4882a593Smuzhiyun return GB_OP_UNKNOWN_ERROR;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
gb_operation_response_alloc(struct gb_operation * operation,size_t response_size,gfp_t gfp)468*4882a593Smuzhiyun bool gb_operation_response_alloc(struct gb_operation *operation,
469*4882a593Smuzhiyun size_t response_size, gfp_t gfp)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun struct gb_host_device *hd = operation->connection->hd;
472*4882a593Smuzhiyun struct gb_operation_msg_hdr *request_header;
473*4882a593Smuzhiyun struct gb_message *response;
474*4882a593Smuzhiyun u8 type;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
477*4882a593Smuzhiyun response = gb_operation_message_alloc(hd, type, response_size, gfp);
478*4882a593Smuzhiyun if (!response)
479*4882a593Smuzhiyun return false;
480*4882a593Smuzhiyun response->operation = operation;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun * Size and type get initialized when the message is
484*4882a593Smuzhiyun * allocated. The errno will be set before sending. All
485*4882a593Smuzhiyun * that's left is the operation id, which we copy from the
486*4882a593Smuzhiyun * request message header (as-is, in little-endian order).
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun request_header = operation->request->header;
489*4882a593Smuzhiyun response->header->operation_id = request_header->operation_id;
490*4882a593Smuzhiyun operation->response = response;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun return true;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * Create a Greybus operation to be sent over the given connection.
498*4882a593Smuzhiyun * The request buffer will be big enough for a payload of the given
499*4882a593Smuzhiyun * size.
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * For outgoing requests, the request message's header will be
502*4882a593Smuzhiyun * initialized with the type of the request and the message size.
503*4882a593Smuzhiyun * Outgoing operations must also specify the response buffer size,
504*4882a593Smuzhiyun * which must be sufficient to hold all expected response data. The
505*4882a593Smuzhiyun * response message header will eventually be overwritten, so there's
506*4882a593Smuzhiyun * no need to initialize it here.
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Request messages for incoming operations can arrive in interrupt
509*4882a593Smuzhiyun * context, so they must be allocated with GFP_ATOMIC. In this case
510*4882a593Smuzhiyun * the request buffer will be immediately overwritten, so there is
511*4882a593Smuzhiyun * no need to initialize the message header. Responsibility for
512*4882a593Smuzhiyun * allocating a response buffer lies with the incoming request
513*4882a593Smuzhiyun * handler for a protocol. So we don't allocate that here.
514*4882a593Smuzhiyun *
515*4882a593Smuzhiyun * Returns a pointer to the new operation or a null pointer if an
516*4882a593Smuzhiyun * error occurs.
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun static struct gb_operation *
gb_operation_create_common(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long op_flags,gfp_t gfp_flags)519*4882a593Smuzhiyun gb_operation_create_common(struct gb_connection *connection, u8 type,
520*4882a593Smuzhiyun size_t request_size, size_t response_size,
521*4882a593Smuzhiyun unsigned long op_flags, gfp_t gfp_flags)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct gb_host_device *hd = connection->hd;
524*4882a593Smuzhiyun struct gb_operation *operation;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
527*4882a593Smuzhiyun if (!operation)
528*4882a593Smuzhiyun return NULL;
529*4882a593Smuzhiyun operation->connection = connection;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun operation->request = gb_operation_message_alloc(hd, type, request_size,
532*4882a593Smuzhiyun gfp_flags);
533*4882a593Smuzhiyun if (!operation->request)
534*4882a593Smuzhiyun goto err_cache;
535*4882a593Smuzhiyun operation->request->operation = operation;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Allocate the response buffer for outgoing operations */
538*4882a593Smuzhiyun if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
539*4882a593Smuzhiyun if (!gb_operation_response_alloc(operation, response_size,
540*4882a593Smuzhiyun gfp_flags)) {
541*4882a593Smuzhiyun goto err_request;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun timer_setup(&operation->timer, gb_operation_timeout, 0);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun operation->flags = op_flags;
548*4882a593Smuzhiyun operation->type = type;
549*4882a593Smuzhiyun operation->errno = -EBADR; /* Initial value--means "never set" */
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun INIT_WORK(&operation->work, gb_operation_work);
552*4882a593Smuzhiyun init_completion(&operation->completion);
553*4882a593Smuzhiyun kref_init(&operation->kref);
554*4882a593Smuzhiyun atomic_set(&operation->waiters, 0);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun return operation;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun err_request:
559*4882a593Smuzhiyun gb_operation_message_free(operation->request);
560*4882a593Smuzhiyun err_cache:
561*4882a593Smuzhiyun kmem_cache_free(gb_operation_cache, operation);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return NULL;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun * Create a new operation associated with the given connection. The
568*4882a593Smuzhiyun * request and response sizes provided are the number of bytes
569*4882a593Smuzhiyun * required to hold the request/response payload only. Both of
570*4882a593Smuzhiyun * these are allowed to be 0. Note that 0x00 is reserved as an
571*4882a593Smuzhiyun * invalid operation type for all protocols, and this is enforced
572*4882a593Smuzhiyun * here.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun struct gb_operation *
gb_operation_create_flags(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)575*4882a593Smuzhiyun gb_operation_create_flags(struct gb_connection *connection,
576*4882a593Smuzhiyun u8 type, size_t request_size,
577*4882a593Smuzhiyun size_t response_size, unsigned long flags,
578*4882a593Smuzhiyun gfp_t gfp)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct gb_operation *operation;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
583*4882a593Smuzhiyun return NULL;
584*4882a593Smuzhiyun if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
585*4882a593Smuzhiyun type &= ~GB_MESSAGE_TYPE_RESPONSE;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
588*4882a593Smuzhiyun flags &= GB_OPERATION_FLAG_USER_MASK;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun operation = gb_operation_create_common(connection, type,
591*4882a593Smuzhiyun request_size, response_size,
592*4882a593Smuzhiyun flags, gfp);
593*4882a593Smuzhiyun if (operation)
594*4882a593Smuzhiyun trace_gb_operation_create(operation);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun return operation;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_create_flags);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun struct gb_operation *
gb_operation_create_core(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)601*4882a593Smuzhiyun gb_operation_create_core(struct gb_connection *connection,
602*4882a593Smuzhiyun u8 type, size_t request_size,
603*4882a593Smuzhiyun size_t response_size, unsigned long flags,
604*4882a593Smuzhiyun gfp_t gfp)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct gb_operation *operation;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun flags |= GB_OPERATION_FLAG_CORE;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun operation = gb_operation_create_common(connection, type,
611*4882a593Smuzhiyun request_size, response_size,
612*4882a593Smuzhiyun flags, gfp);
613*4882a593Smuzhiyun if (operation)
614*4882a593Smuzhiyun trace_gb_operation_create_core(operation);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun return operation;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /* Do not export this function. */
620*4882a593Smuzhiyun
gb_operation_get_payload_size_max(struct gb_connection * connection)621*4882a593Smuzhiyun size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct gb_host_device *hd = connection->hd;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun static struct gb_operation *
gb_operation_create_incoming(struct gb_connection * connection,u16 id,u8 type,void * data,size_t size)630*4882a593Smuzhiyun gb_operation_create_incoming(struct gb_connection *connection, u16 id,
631*4882a593Smuzhiyun u8 type, void *data, size_t size)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct gb_operation *operation;
634*4882a593Smuzhiyun size_t request_size;
635*4882a593Smuzhiyun unsigned long flags = GB_OPERATION_FLAG_INCOMING;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /* Caller has made sure we at least have a message header. */
638*4882a593Smuzhiyun request_size = size - sizeof(struct gb_operation_msg_hdr);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (!id)
641*4882a593Smuzhiyun flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun operation = gb_operation_create_common(connection, type,
644*4882a593Smuzhiyun request_size,
645*4882a593Smuzhiyun GB_REQUEST_TYPE_INVALID,
646*4882a593Smuzhiyun flags, GFP_ATOMIC);
647*4882a593Smuzhiyun if (!operation)
648*4882a593Smuzhiyun return NULL;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun operation->id = id;
651*4882a593Smuzhiyun memcpy(operation->request->header, data, size);
652*4882a593Smuzhiyun trace_gb_operation_create_incoming(operation);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return operation;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun * Get an additional reference on an operation.
659*4882a593Smuzhiyun */
gb_operation_get(struct gb_operation * operation)660*4882a593Smuzhiyun void gb_operation_get(struct gb_operation *operation)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun kref_get(&operation->kref);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_get);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun * Destroy a previously created operation.
668*4882a593Smuzhiyun */
_gb_operation_destroy(struct kref * kref)669*4882a593Smuzhiyun static void _gb_operation_destroy(struct kref *kref)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct gb_operation *operation;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun operation = container_of(kref, struct gb_operation, kref);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun trace_gb_operation_destroy(operation);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (operation->response)
678*4882a593Smuzhiyun gb_operation_message_free(operation->response);
679*4882a593Smuzhiyun gb_operation_message_free(operation->request);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun kmem_cache_free(gb_operation_cache, operation);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * Drop a reference on an operation, and destroy it when the last
686*4882a593Smuzhiyun * one is gone.
687*4882a593Smuzhiyun */
gb_operation_put(struct gb_operation * operation)688*4882a593Smuzhiyun void gb_operation_put(struct gb_operation *operation)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun if (WARN_ON(!operation))
691*4882a593Smuzhiyun return;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun kref_put(&operation->kref, _gb_operation_destroy);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_put);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* Tell the requester we're done */
gb_operation_sync_callback(struct gb_operation * operation)698*4882a593Smuzhiyun static void gb_operation_sync_callback(struct gb_operation *operation)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun complete(&operation->completion);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /**
704*4882a593Smuzhiyun * gb_operation_request_send() - send an operation request message
705*4882a593Smuzhiyun * @operation: the operation to initiate
706*4882a593Smuzhiyun * @callback: the operation completion callback
707*4882a593Smuzhiyun * @timeout: operation timeout in milliseconds, or zero for no timeout
708*4882a593Smuzhiyun * @gfp: the memory flags to use for any allocations
709*4882a593Smuzhiyun *
710*4882a593Smuzhiyun * The caller has filled in any payload so the request message is ready to go.
711*4882a593Smuzhiyun * The callback function supplied will be called when the response message has
712*4882a593Smuzhiyun * arrived, a unidirectional request has been sent, or the operation is
713*4882a593Smuzhiyun * cancelled, indicating that the operation is complete. The callback function
714*4882a593Smuzhiyun * can fetch the result of the operation using gb_operation_result() if
715*4882a593Smuzhiyun * desired.
716*4882a593Smuzhiyun *
717*4882a593Smuzhiyun * Return: 0 if the request was successfully queued in the host-driver queues,
718*4882a593Smuzhiyun * or a negative errno.
719*4882a593Smuzhiyun */
gb_operation_request_send(struct gb_operation * operation,gb_operation_callback callback,unsigned int timeout,gfp_t gfp)720*4882a593Smuzhiyun int gb_operation_request_send(struct gb_operation *operation,
721*4882a593Smuzhiyun gb_operation_callback callback,
722*4882a593Smuzhiyun unsigned int timeout,
723*4882a593Smuzhiyun gfp_t gfp)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
726*4882a593Smuzhiyun struct gb_operation_msg_hdr *header;
727*4882a593Smuzhiyun unsigned int cycle;
728*4882a593Smuzhiyun int ret;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (gb_connection_is_offloaded(connection))
731*4882a593Smuzhiyun return -EBUSY;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (!callback)
734*4882a593Smuzhiyun return -EINVAL;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /*
737*4882a593Smuzhiyun * Record the callback function, which is executed in
738*4882a593Smuzhiyun * non-atomic (workqueue) context when the final result
739*4882a593Smuzhiyun * of an operation has been set.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun operation->callback = callback;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /*
744*4882a593Smuzhiyun * Assign the operation's id, and store it in the request header.
745*4882a593Smuzhiyun * Zero is a reserved operation id for unidirectional operations.
746*4882a593Smuzhiyun */
747*4882a593Smuzhiyun if (gb_operation_is_unidirectional(operation)) {
748*4882a593Smuzhiyun operation->id = 0;
749*4882a593Smuzhiyun } else {
750*4882a593Smuzhiyun cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
751*4882a593Smuzhiyun operation->id = (u16)(cycle % U16_MAX + 1);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun header = operation->request->header;
755*4882a593Smuzhiyun header->operation_id = cpu_to_le16(operation->id);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun gb_operation_result_set(operation, -EINPROGRESS);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /*
760*4882a593Smuzhiyun * Get an extra reference on the operation. It'll be dropped when the
761*4882a593Smuzhiyun * operation completes.
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun gb_operation_get(operation);
764*4882a593Smuzhiyun ret = gb_operation_get_active(operation);
765*4882a593Smuzhiyun if (ret)
766*4882a593Smuzhiyun goto err_put;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun ret = gb_message_send(operation->request, gfp);
769*4882a593Smuzhiyun if (ret)
770*4882a593Smuzhiyun goto err_put_active;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (timeout) {
773*4882a593Smuzhiyun operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
774*4882a593Smuzhiyun add_timer(&operation->timer);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun err_put_active:
780*4882a593Smuzhiyun gb_operation_put_active(operation);
781*4882a593Smuzhiyun err_put:
782*4882a593Smuzhiyun gb_operation_put(operation);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun return ret;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_request_send);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /*
789*4882a593Smuzhiyun * Send a synchronous operation. This function is expected to
790*4882a593Smuzhiyun * block, returning only when the response has arrived, (or when an
791*4882a593Smuzhiyun * error is detected. The return value is the result of the
792*4882a593Smuzhiyun * operation.
793*4882a593Smuzhiyun */
gb_operation_request_send_sync_timeout(struct gb_operation * operation,unsigned int timeout)794*4882a593Smuzhiyun int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
795*4882a593Smuzhiyun unsigned int timeout)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun int ret;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun ret = gb_operation_request_send(operation, gb_operation_sync_callback,
800*4882a593Smuzhiyun timeout, GFP_KERNEL);
801*4882a593Smuzhiyun if (ret)
802*4882a593Smuzhiyun return ret;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun ret = wait_for_completion_interruptible(&operation->completion);
805*4882a593Smuzhiyun if (ret < 0) {
806*4882a593Smuzhiyun /* Cancel the operation if interrupted */
807*4882a593Smuzhiyun gb_operation_cancel(operation, -ECANCELED);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return gb_operation_result(operation);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun * Send a response for an incoming operation request. A non-zero
816*4882a593Smuzhiyun * errno indicates a failed operation.
817*4882a593Smuzhiyun *
818*4882a593Smuzhiyun * If there is any response payload, the incoming request handler is
819*4882a593Smuzhiyun * responsible for allocating the response message. Otherwise the
820*4882a593Smuzhiyun * it can simply supply the result errno; this function will
821*4882a593Smuzhiyun * allocate the response message if necessary.
822*4882a593Smuzhiyun */
gb_operation_response_send(struct gb_operation * operation,int errno)823*4882a593Smuzhiyun static int gb_operation_response_send(struct gb_operation *operation,
824*4882a593Smuzhiyun int errno)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
827*4882a593Smuzhiyun int ret;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (!operation->response &&
830*4882a593Smuzhiyun !gb_operation_is_unidirectional(operation)) {
831*4882a593Smuzhiyun if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
832*4882a593Smuzhiyun return -ENOMEM;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* Record the result */
836*4882a593Smuzhiyun if (!gb_operation_result_set(operation, errno)) {
837*4882a593Smuzhiyun dev_err(&connection->hd->dev, "request result already set\n");
838*4882a593Smuzhiyun return -EIO; /* Shouldn't happen */
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* Sender of request does not care about response. */
842*4882a593Smuzhiyun if (gb_operation_is_unidirectional(operation))
843*4882a593Smuzhiyun return 0;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* Reference will be dropped when message has been sent. */
846*4882a593Smuzhiyun gb_operation_get(operation);
847*4882a593Smuzhiyun ret = gb_operation_get_active(operation);
848*4882a593Smuzhiyun if (ret)
849*4882a593Smuzhiyun goto err_put;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* Fill in the response header and send it */
852*4882a593Smuzhiyun operation->response->header->result = gb_operation_errno_map(errno);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun ret = gb_message_send(operation->response, GFP_KERNEL);
855*4882a593Smuzhiyun if (ret)
856*4882a593Smuzhiyun goto err_put_active;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun return 0;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun err_put_active:
861*4882a593Smuzhiyun gb_operation_put_active(operation);
862*4882a593Smuzhiyun err_put:
863*4882a593Smuzhiyun gb_operation_put(operation);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun return ret;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * This function is called when a message send request has completed.
870*4882a593Smuzhiyun */
greybus_message_sent(struct gb_host_device * hd,struct gb_message * message,int status)871*4882a593Smuzhiyun void greybus_message_sent(struct gb_host_device *hd,
872*4882a593Smuzhiyun struct gb_message *message, int status)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct gb_operation *operation = message->operation;
875*4882a593Smuzhiyun struct gb_connection *connection = operation->connection;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /*
878*4882a593Smuzhiyun * If the message was a response, we just need to drop our
879*4882a593Smuzhiyun * reference to the operation. If an error occurred, report
880*4882a593Smuzhiyun * it.
881*4882a593Smuzhiyun *
882*4882a593Smuzhiyun * For requests, if there's no error and the operation in not
883*4882a593Smuzhiyun * unidirectional, there's nothing more to do until the response
884*4882a593Smuzhiyun * arrives. If an error occurred attempting to send it, or if the
885*4882a593Smuzhiyun * operation is unidrectional, record the result of the operation and
886*4882a593Smuzhiyun * schedule its completion.
887*4882a593Smuzhiyun */
888*4882a593Smuzhiyun if (message == operation->response) {
889*4882a593Smuzhiyun if (status) {
890*4882a593Smuzhiyun dev_err(&connection->hd->dev,
891*4882a593Smuzhiyun "%s: error sending response 0x%02x: %d\n",
892*4882a593Smuzhiyun connection->name, operation->type, status);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun gb_operation_put_active(operation);
896*4882a593Smuzhiyun gb_operation_put(operation);
897*4882a593Smuzhiyun } else if (status || gb_operation_is_unidirectional(operation)) {
898*4882a593Smuzhiyun if (gb_operation_result_set(operation, status)) {
899*4882a593Smuzhiyun queue_work(gb_operation_completion_wq,
900*4882a593Smuzhiyun &operation->work);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(greybus_message_sent);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /*
907*4882a593Smuzhiyun * We've received data on a connection, and it doesn't look like a
908*4882a593Smuzhiyun * response, so we assume it's a request.
909*4882a593Smuzhiyun *
910*4882a593Smuzhiyun * This is called in interrupt context, so just copy the incoming
911*4882a593Smuzhiyun * data into the request buffer and handle the rest via workqueue.
912*4882a593Smuzhiyun */
gb_connection_recv_request(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)913*4882a593Smuzhiyun static void gb_connection_recv_request(struct gb_connection *connection,
914*4882a593Smuzhiyun const struct gb_operation_msg_hdr *header,
915*4882a593Smuzhiyun void *data, size_t size)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun struct gb_operation *operation;
918*4882a593Smuzhiyun u16 operation_id;
919*4882a593Smuzhiyun u8 type;
920*4882a593Smuzhiyun int ret;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun operation_id = le16_to_cpu(header->operation_id);
923*4882a593Smuzhiyun type = header->type;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun operation = gb_operation_create_incoming(connection, operation_id,
926*4882a593Smuzhiyun type, data, size);
927*4882a593Smuzhiyun if (!operation) {
928*4882a593Smuzhiyun dev_err(&connection->hd->dev,
929*4882a593Smuzhiyun "%s: can't create incoming operation\n",
930*4882a593Smuzhiyun connection->name);
931*4882a593Smuzhiyun return;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun ret = gb_operation_get_active(operation);
935*4882a593Smuzhiyun if (ret) {
936*4882a593Smuzhiyun gb_operation_put(operation);
937*4882a593Smuzhiyun return;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun trace_gb_message_recv_request(operation->request);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * The initial reference to the operation will be dropped when the
943*4882a593Smuzhiyun * request handler returns.
944*4882a593Smuzhiyun */
945*4882a593Smuzhiyun if (gb_operation_result_set(operation, -EINPROGRESS))
946*4882a593Smuzhiyun queue_work(connection->wq, &operation->work);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * We've received data that appears to be an operation response
951*4882a593Smuzhiyun * message. Look up the operation, and record that we've received
952*4882a593Smuzhiyun * its response.
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun * This is called in interrupt context, so just copy the incoming
955*4882a593Smuzhiyun * data into the response buffer and handle the rest via workqueue.
956*4882a593Smuzhiyun */
gb_connection_recv_response(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)957*4882a593Smuzhiyun static void gb_connection_recv_response(struct gb_connection *connection,
958*4882a593Smuzhiyun const struct gb_operation_msg_hdr *header,
959*4882a593Smuzhiyun void *data, size_t size)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun struct gb_operation *operation;
962*4882a593Smuzhiyun struct gb_message *message;
963*4882a593Smuzhiyun size_t message_size;
964*4882a593Smuzhiyun u16 operation_id;
965*4882a593Smuzhiyun int errno;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun operation_id = le16_to_cpu(header->operation_id);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if (!operation_id) {
970*4882a593Smuzhiyun dev_err_ratelimited(&connection->hd->dev,
971*4882a593Smuzhiyun "%s: invalid response id 0 received\n",
972*4882a593Smuzhiyun connection->name);
973*4882a593Smuzhiyun return;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun operation = gb_operation_find_outgoing(connection, operation_id);
977*4882a593Smuzhiyun if (!operation) {
978*4882a593Smuzhiyun dev_err_ratelimited(&connection->hd->dev,
979*4882a593Smuzhiyun "%s: unexpected response id 0x%04x received\n",
980*4882a593Smuzhiyun connection->name, operation_id);
981*4882a593Smuzhiyun return;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun errno = gb_operation_status_map(header->result);
985*4882a593Smuzhiyun message = operation->response;
986*4882a593Smuzhiyun message_size = sizeof(*header) + message->payload_size;
987*4882a593Smuzhiyun if (!errno && size > message_size) {
988*4882a593Smuzhiyun dev_err_ratelimited(&connection->hd->dev,
989*4882a593Smuzhiyun "%s: malformed response 0x%02x received (%zu > %zu)\n",
990*4882a593Smuzhiyun connection->name, header->type,
991*4882a593Smuzhiyun size, message_size);
992*4882a593Smuzhiyun errno = -EMSGSIZE;
993*4882a593Smuzhiyun } else if (!errno && size < message_size) {
994*4882a593Smuzhiyun if (gb_operation_short_response_allowed(operation)) {
995*4882a593Smuzhiyun message->payload_size = size - sizeof(*header);
996*4882a593Smuzhiyun } else {
997*4882a593Smuzhiyun dev_err_ratelimited(&connection->hd->dev,
998*4882a593Smuzhiyun "%s: short response 0x%02x received (%zu < %zu)\n",
999*4882a593Smuzhiyun connection->name, header->type,
1000*4882a593Smuzhiyun size, message_size);
1001*4882a593Smuzhiyun errno = -EMSGSIZE;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* We must ignore the payload if a bad status is returned */
1006*4882a593Smuzhiyun if (errno)
1007*4882a593Smuzhiyun size = sizeof(*header);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* The rest will be handled in work queue context */
1010*4882a593Smuzhiyun if (gb_operation_result_set(operation, errno)) {
1011*4882a593Smuzhiyun memcpy(message->buffer, data, size);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun trace_gb_message_recv_response(message);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun queue_work(gb_operation_completion_wq, &operation->work);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun gb_operation_put(operation);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun /*
1022*4882a593Smuzhiyun * Handle data arriving on a connection. As soon as we return the
1023*4882a593Smuzhiyun * supplied data buffer will be reused (so unless we do something
1024*4882a593Smuzhiyun * with, it's effectively dropped).
1025*4882a593Smuzhiyun */
gb_connection_recv(struct gb_connection * connection,void * data,size_t size)1026*4882a593Smuzhiyun void gb_connection_recv(struct gb_connection *connection,
1027*4882a593Smuzhiyun void *data, size_t size)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun struct gb_operation_msg_hdr header;
1030*4882a593Smuzhiyun struct device *dev = &connection->hd->dev;
1031*4882a593Smuzhiyun size_t msg_size;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1034*4882a593Smuzhiyun gb_connection_is_offloaded(connection)) {
1035*4882a593Smuzhiyun dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1036*4882a593Smuzhiyun connection->name, size);
1037*4882a593Smuzhiyun return;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (size < sizeof(header)) {
1041*4882a593Smuzhiyun dev_err_ratelimited(dev, "%s: short message received\n",
1042*4882a593Smuzhiyun connection->name);
1043*4882a593Smuzhiyun return;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* Use memcpy as data may be unaligned */
1047*4882a593Smuzhiyun memcpy(&header, data, sizeof(header));
1048*4882a593Smuzhiyun msg_size = le16_to_cpu(header.size);
1049*4882a593Smuzhiyun if (size < msg_size) {
1050*4882a593Smuzhiyun dev_err_ratelimited(dev,
1051*4882a593Smuzhiyun "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1052*4882a593Smuzhiyun connection->name,
1053*4882a593Smuzhiyun le16_to_cpu(header.operation_id),
1054*4882a593Smuzhiyun header.type, size, msg_size);
1055*4882a593Smuzhiyun return; /* XXX Should still complete operation */
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1059*4882a593Smuzhiyun gb_connection_recv_response(connection, &header, data,
1060*4882a593Smuzhiyun msg_size);
1061*4882a593Smuzhiyun } else {
1062*4882a593Smuzhiyun gb_connection_recv_request(connection, &header, data,
1063*4882a593Smuzhiyun msg_size);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun /*
1068*4882a593Smuzhiyun * Cancel an outgoing operation synchronously, and record the given error to
1069*4882a593Smuzhiyun * indicate why.
1070*4882a593Smuzhiyun */
gb_operation_cancel(struct gb_operation * operation,int errno)1071*4882a593Smuzhiyun void gb_operation_cancel(struct gb_operation *operation, int errno)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun if (WARN_ON(gb_operation_is_incoming(operation)))
1074*4882a593Smuzhiyun return;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (gb_operation_result_set(operation, errno)) {
1077*4882a593Smuzhiyun gb_message_cancel(operation->request);
1078*4882a593Smuzhiyun queue_work(gb_operation_completion_wq, &operation->work);
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun trace_gb_message_cancel_outgoing(operation->request);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun atomic_inc(&operation->waiters);
1083*4882a593Smuzhiyun wait_event(gb_operation_cancellation_queue,
1084*4882a593Smuzhiyun !gb_operation_is_active(operation));
1085*4882a593Smuzhiyun atomic_dec(&operation->waiters);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_cancel);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /*
1090*4882a593Smuzhiyun * Cancel an incoming operation synchronously. Called during connection tear
1091*4882a593Smuzhiyun * down.
1092*4882a593Smuzhiyun */
gb_operation_cancel_incoming(struct gb_operation * operation,int errno)1093*4882a593Smuzhiyun void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun if (WARN_ON(!gb_operation_is_incoming(operation)))
1096*4882a593Smuzhiyun return;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun if (!gb_operation_is_unidirectional(operation)) {
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun * Make sure the request handler has submitted the response
1101*4882a593Smuzhiyun * before cancelling it.
1102*4882a593Smuzhiyun */
1103*4882a593Smuzhiyun flush_work(&operation->work);
1104*4882a593Smuzhiyun if (!gb_operation_result_set(operation, errno))
1105*4882a593Smuzhiyun gb_message_cancel(operation->response);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun trace_gb_message_cancel_incoming(operation->response);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun atomic_inc(&operation->waiters);
1110*4882a593Smuzhiyun wait_event(gb_operation_cancellation_queue,
1111*4882a593Smuzhiyun !gb_operation_is_active(operation));
1112*4882a593Smuzhiyun atomic_dec(&operation->waiters);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /**
1116*4882a593Smuzhiyun * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1117*4882a593Smuzhiyun * @connection: the Greybus connection to send this to
1118*4882a593Smuzhiyun * @type: the type of operation to send
1119*4882a593Smuzhiyun * @request: pointer to a memory buffer to copy the request from
1120*4882a593Smuzhiyun * @request_size: size of @request
1121*4882a593Smuzhiyun * @response: pointer to a memory buffer to copy the response to
1122*4882a593Smuzhiyun * @response_size: the size of @response.
1123*4882a593Smuzhiyun * @timeout: operation timeout in milliseconds
1124*4882a593Smuzhiyun *
1125*4882a593Smuzhiyun * This function implements a simple synchronous Greybus operation. It sends
1126*4882a593Smuzhiyun * the provided operation request and waits (sleeps) until the corresponding
1127*4882a593Smuzhiyun * operation response message has been successfully received, or an error
1128*4882a593Smuzhiyun * occurs. @request and @response are buffers to hold the request and response
1129*4882a593Smuzhiyun * data respectively, and if they are not NULL, their size must be specified in
1130*4882a593Smuzhiyun * @request_size and @response_size.
1131*4882a593Smuzhiyun *
1132*4882a593Smuzhiyun * If a response payload is to come back, and @response is not NULL,
1133*4882a593Smuzhiyun * @response_size number of bytes will be copied into @response if the operation
1134*4882a593Smuzhiyun * is successful.
1135*4882a593Smuzhiyun *
1136*4882a593Smuzhiyun * If there is an error, the response buffer is left alone.
1137*4882a593Smuzhiyun */
gb_operation_sync_timeout(struct gb_connection * connection,int type,void * request,int request_size,void * response,int response_size,unsigned int timeout)1138*4882a593Smuzhiyun int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1139*4882a593Smuzhiyun void *request, int request_size,
1140*4882a593Smuzhiyun void *response, int response_size,
1141*4882a593Smuzhiyun unsigned int timeout)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun struct gb_operation *operation;
1144*4882a593Smuzhiyun int ret;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun if ((response_size && !response) ||
1147*4882a593Smuzhiyun (request_size && !request))
1148*4882a593Smuzhiyun return -EINVAL;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun operation = gb_operation_create(connection, type,
1151*4882a593Smuzhiyun request_size, response_size,
1152*4882a593Smuzhiyun GFP_KERNEL);
1153*4882a593Smuzhiyun if (!operation)
1154*4882a593Smuzhiyun return -ENOMEM;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (request_size)
1157*4882a593Smuzhiyun memcpy(operation->request->payload, request, request_size);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun ret = gb_operation_request_send_sync_timeout(operation, timeout);
1160*4882a593Smuzhiyun if (ret) {
1161*4882a593Smuzhiyun dev_err(&connection->hd->dev,
1162*4882a593Smuzhiyun "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163*4882a593Smuzhiyun connection->name, operation->id, type, ret);
1164*4882a593Smuzhiyun } else {
1165*4882a593Smuzhiyun if (response_size) {
1166*4882a593Smuzhiyun memcpy(response, operation->response->payload,
1167*4882a593Smuzhiyun response_size);
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun gb_operation_put(operation);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun return ret;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /**
1178*4882a593Smuzhiyun * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1179*4882a593Smuzhiyun * @connection: connection to use
1180*4882a593Smuzhiyun * @type: type of operation to send
1181*4882a593Smuzhiyun * @request: memory buffer to copy the request from
1182*4882a593Smuzhiyun * @request_size: size of @request
1183*4882a593Smuzhiyun * @timeout: send timeout in milliseconds
1184*4882a593Smuzhiyun *
1185*4882a593Smuzhiyun * Initiate a unidirectional operation by sending a request message and
1186*4882a593Smuzhiyun * waiting for it to be acknowledged as sent by the host device.
1187*4882a593Smuzhiyun *
1188*4882a593Smuzhiyun * Note that successful send of a unidirectional operation does not imply that
1189*4882a593Smuzhiyun * the request as actually reached the remote end of the connection.
1190*4882a593Smuzhiyun */
gb_operation_unidirectional_timeout(struct gb_connection * connection,int type,void * request,int request_size,unsigned int timeout)1191*4882a593Smuzhiyun int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1192*4882a593Smuzhiyun int type, void *request,
1193*4882a593Smuzhiyun int request_size,
1194*4882a593Smuzhiyun unsigned int timeout)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun struct gb_operation *operation;
1197*4882a593Smuzhiyun int ret;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (request_size && !request)
1200*4882a593Smuzhiyun return -EINVAL;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun operation = gb_operation_create_flags(connection, type,
1203*4882a593Smuzhiyun request_size, 0,
1204*4882a593Smuzhiyun GB_OPERATION_FLAG_UNIDIRECTIONAL,
1205*4882a593Smuzhiyun GFP_KERNEL);
1206*4882a593Smuzhiyun if (!operation)
1207*4882a593Smuzhiyun return -ENOMEM;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun if (request_size)
1210*4882a593Smuzhiyun memcpy(operation->request->payload, request, request_size);
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun ret = gb_operation_request_send_sync_timeout(operation, timeout);
1213*4882a593Smuzhiyun if (ret) {
1214*4882a593Smuzhiyun dev_err(&connection->hd->dev,
1215*4882a593Smuzhiyun "%s: unidirectional operation of type 0x%02x failed: %d\n",
1216*4882a593Smuzhiyun connection->name, type, ret);
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun gb_operation_put(operation);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun return ret;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1224*4882a593Smuzhiyun
gb_operation_init(void)1225*4882a593Smuzhiyun int __init gb_operation_init(void)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun gb_message_cache = kmem_cache_create("gb_message_cache",
1228*4882a593Smuzhiyun sizeof(struct gb_message), 0, 0,
1229*4882a593Smuzhiyun NULL);
1230*4882a593Smuzhiyun if (!gb_message_cache)
1231*4882a593Smuzhiyun return -ENOMEM;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun gb_operation_cache = kmem_cache_create("gb_operation_cache",
1234*4882a593Smuzhiyun sizeof(struct gb_operation), 0,
1235*4882a593Smuzhiyun 0, NULL);
1236*4882a593Smuzhiyun if (!gb_operation_cache)
1237*4882a593Smuzhiyun goto err_destroy_message_cache;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1240*4882a593Smuzhiyun 0, 0);
1241*4882a593Smuzhiyun if (!gb_operation_completion_wq)
1242*4882a593Smuzhiyun goto err_destroy_operation_cache;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun return 0;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun err_destroy_operation_cache:
1247*4882a593Smuzhiyun kmem_cache_destroy(gb_operation_cache);
1248*4882a593Smuzhiyun gb_operation_cache = NULL;
1249*4882a593Smuzhiyun err_destroy_message_cache:
1250*4882a593Smuzhiyun kmem_cache_destroy(gb_message_cache);
1251*4882a593Smuzhiyun gb_message_cache = NULL;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun return -ENOMEM;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
gb_operation_exit(void)1256*4882a593Smuzhiyun void gb_operation_exit(void)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun destroy_workqueue(gb_operation_completion_wq);
1259*4882a593Smuzhiyun gb_operation_completion_wq = NULL;
1260*4882a593Smuzhiyun kmem_cache_destroy(gb_operation_cache);
1261*4882a593Smuzhiyun gb_operation_cache = NULL;
1262*4882a593Smuzhiyun kmem_cache_destroy(gb_message_cache);
1263*4882a593Smuzhiyun gb_message_cache = NULL;
1264*4882a593Smuzhiyun }
1265