1*e2469d82SVarun Wadekar /*
2*e2469d82SVarun Wadekar * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
3*e2469d82SVarun Wadekar *
4*e2469d82SVarun Wadekar * SPDX-License-Identifier: BSD-3-Clause
5*e2469d82SVarun Wadekar */
6*e2469d82SVarun Wadekar
7*e2469d82SVarun Wadekar #include <arch_helpers.h>
8*e2469d82SVarun Wadekar #include <assert.h>
9*e2469d82SVarun Wadekar #include <common/debug.h>
10*e2469d82SVarun Wadekar #include <errno.h>
11*e2469d82SVarun Wadekar #include <stdbool.h>
12*e2469d82SVarun Wadekar #include <stddef.h>
13*e2469d82SVarun Wadekar #include <string.h>
14*e2469d82SVarun Wadekar
15*e2469d82SVarun Wadekar #include "ivc.h"
16*e2469d82SVarun Wadekar
17*e2469d82SVarun Wadekar /*
18*e2469d82SVarun Wadekar * IVC channel reset protocol.
19*e2469d82SVarun Wadekar *
20*e2469d82SVarun Wadekar * Each end uses its tx_channel.state to indicate its synchronization state.
21*e2469d82SVarun Wadekar */
22*e2469d82SVarun Wadekar enum {
23*e2469d82SVarun Wadekar /*
24*e2469d82SVarun Wadekar * This value is zero for backwards compatibility with services that
25*e2469d82SVarun Wadekar * assume channels to be initially zeroed. Such channels are in an
26*e2469d82SVarun Wadekar * initially valid state, but cannot be asynchronously reset, and must
27*e2469d82SVarun Wadekar * maintain a valid state at all times.
28*e2469d82SVarun Wadekar *
29*e2469d82SVarun Wadekar * The transmitting end can enter the established state from the sync or
30*e2469d82SVarun Wadekar * ack state when it observes the receiving endpoint in the ack or
31*e2469d82SVarun Wadekar * established state, indicating that has cleared the counters in our
32*e2469d82SVarun Wadekar * rx_channel.
33*e2469d82SVarun Wadekar */
34*e2469d82SVarun Wadekar ivc_state_established = U(0),
35*e2469d82SVarun Wadekar
36*e2469d82SVarun Wadekar /*
37*e2469d82SVarun Wadekar * If an endpoint is observed in the sync state, the remote endpoint is
38*e2469d82SVarun Wadekar * allowed to clear the counters it owns asynchronously with respect to
39*e2469d82SVarun Wadekar * the current endpoint. Therefore, the current endpoint is no longer
40*e2469d82SVarun Wadekar * allowed to communicate.
41*e2469d82SVarun Wadekar */
42*e2469d82SVarun Wadekar ivc_state_sync = U(1),
43*e2469d82SVarun Wadekar
44*e2469d82SVarun Wadekar /*
45*e2469d82SVarun Wadekar * When the transmitting end observes the receiving end in the sync
46*e2469d82SVarun Wadekar * state, it can clear the w_count and r_count and transition to the ack
47*e2469d82SVarun Wadekar * state. If the remote endpoint observes us in the ack state, it can
48*e2469d82SVarun Wadekar * return to the established state once it has cleared its counters.
49*e2469d82SVarun Wadekar */
50*e2469d82SVarun Wadekar ivc_state_ack = U(2)
51*e2469d82SVarun Wadekar };
52*e2469d82SVarun Wadekar
53*e2469d82SVarun Wadekar /*
54*e2469d82SVarun Wadekar * This structure is divided into two-cache aligned parts, the first is only
55*e2469d82SVarun Wadekar * written through the tx_channel pointer, while the second is only written
56*e2469d82SVarun Wadekar * through the rx_channel pointer. This delineates ownership of the cache lines,
57*e2469d82SVarun Wadekar * which is critical to performance and necessary in non-cache coherent
58*e2469d82SVarun Wadekar * implementations.
59*e2469d82SVarun Wadekar */
60*e2469d82SVarun Wadekar struct ivc_channel_header {
61*e2469d82SVarun Wadekar struct {
62*e2469d82SVarun Wadekar /* fields owned by the transmitting end */
63*e2469d82SVarun Wadekar uint32_t w_count;
64*e2469d82SVarun Wadekar uint32_t state;
65*e2469d82SVarun Wadekar uint32_t w_rsvd[IVC_CHHDR_TX_FIELDS - 2];
66*e2469d82SVarun Wadekar };
67*e2469d82SVarun Wadekar struct {
68*e2469d82SVarun Wadekar /* fields owned by the receiving end */
69*e2469d82SVarun Wadekar uint32_t r_count;
70*e2469d82SVarun Wadekar uint32_t r_rsvd[IVC_CHHDR_RX_FIELDS - 1];
71*e2469d82SVarun Wadekar };
72*e2469d82SVarun Wadekar };
73*e2469d82SVarun Wadekar
ivc_channel_empty(const struct ivc * ivc,volatile const struct ivc_channel_header * ch)74*e2469d82SVarun Wadekar static inline bool ivc_channel_empty(const struct ivc *ivc,
75*e2469d82SVarun Wadekar volatile const struct ivc_channel_header *ch)
76*e2469d82SVarun Wadekar {
77*e2469d82SVarun Wadekar /*
78*e2469d82SVarun Wadekar * This function performs multiple checks on the same values with
79*e2469d82SVarun Wadekar * security implications, so sample the counters' current values in
80*e2469d82SVarun Wadekar * shared memory to ensure that these checks use the same values.
81*e2469d82SVarun Wadekar */
82*e2469d82SVarun Wadekar uint32_t wr_count = ch->w_count;
83*e2469d82SVarun Wadekar uint32_t rd_count = ch->r_count;
84*e2469d82SVarun Wadekar bool ret = false;
85*e2469d82SVarun Wadekar
86*e2469d82SVarun Wadekar (void)ivc;
87*e2469d82SVarun Wadekar
88*e2469d82SVarun Wadekar /*
89*e2469d82SVarun Wadekar * Perform an over-full check to prevent denial of service attacks where
90*e2469d82SVarun Wadekar * a server could be easily fooled into believing that there's an
91*e2469d82SVarun Wadekar * extremely large number of frames ready, since receivers are not
92*e2469d82SVarun Wadekar * expected to check for full or over-full conditions.
93*e2469d82SVarun Wadekar *
94*e2469d82SVarun Wadekar * Although the channel isn't empty, this is an invalid case caused by
95*e2469d82SVarun Wadekar * a potentially malicious peer, so returning empty is safer, because it
96*e2469d82SVarun Wadekar * gives the impression that the channel has gone silent.
97*e2469d82SVarun Wadekar */
98*e2469d82SVarun Wadekar if (((wr_count - rd_count) > ivc->nframes) || (wr_count == rd_count)) {
99*e2469d82SVarun Wadekar ret = true;
100*e2469d82SVarun Wadekar }
101*e2469d82SVarun Wadekar
102*e2469d82SVarun Wadekar return ret;
103*e2469d82SVarun Wadekar }
104*e2469d82SVarun Wadekar
ivc_channel_full(const struct ivc * ivc,volatile const struct ivc_channel_header * ch)105*e2469d82SVarun Wadekar static inline bool ivc_channel_full(const struct ivc *ivc,
106*e2469d82SVarun Wadekar volatile const struct ivc_channel_header *ch)
107*e2469d82SVarun Wadekar {
108*e2469d82SVarun Wadekar uint32_t wr_count = ch->w_count;
109*e2469d82SVarun Wadekar uint32_t rd_count = ch->r_count;
110*e2469d82SVarun Wadekar
111*e2469d82SVarun Wadekar (void)ivc;
112*e2469d82SVarun Wadekar
113*e2469d82SVarun Wadekar /*
114*e2469d82SVarun Wadekar * Invalid cases where the counters indicate that the queue is over
115*e2469d82SVarun Wadekar * capacity also appear full.
116*e2469d82SVarun Wadekar */
117*e2469d82SVarun Wadekar return ((wr_count - rd_count) >= ivc->nframes);
118*e2469d82SVarun Wadekar }
119*e2469d82SVarun Wadekar
ivc_channel_avail_count(const struct ivc * ivc,volatile const struct ivc_channel_header * ch)120*e2469d82SVarun Wadekar static inline uint32_t ivc_channel_avail_count(const struct ivc *ivc,
121*e2469d82SVarun Wadekar volatile const struct ivc_channel_header *ch)
122*e2469d82SVarun Wadekar {
123*e2469d82SVarun Wadekar uint32_t wr_count = ch->w_count;
124*e2469d82SVarun Wadekar uint32_t rd_count = ch->r_count;
125*e2469d82SVarun Wadekar
126*e2469d82SVarun Wadekar (void)ivc;
127*e2469d82SVarun Wadekar
128*e2469d82SVarun Wadekar /*
129*e2469d82SVarun Wadekar * This function isn't expected to be used in scenarios where an
130*e2469d82SVarun Wadekar * over-full situation can lead to denial of service attacks. See the
131*e2469d82SVarun Wadekar * comment in ivc_channel_empty() for an explanation about special
132*e2469d82SVarun Wadekar * over-full considerations.
133*e2469d82SVarun Wadekar */
134*e2469d82SVarun Wadekar return (wr_count - rd_count);
135*e2469d82SVarun Wadekar }
136*e2469d82SVarun Wadekar
ivc_advance_tx(struct ivc * ivc)137*e2469d82SVarun Wadekar static inline void ivc_advance_tx(struct ivc *ivc)
138*e2469d82SVarun Wadekar {
139*e2469d82SVarun Wadekar ivc->tx_channel->w_count++;
140*e2469d82SVarun Wadekar
141*e2469d82SVarun Wadekar if (ivc->w_pos == (ivc->nframes - (uint32_t)1U)) {
142*e2469d82SVarun Wadekar ivc->w_pos = 0U;
143*e2469d82SVarun Wadekar } else {
144*e2469d82SVarun Wadekar ivc->w_pos++;
145*e2469d82SVarun Wadekar }
146*e2469d82SVarun Wadekar }
147*e2469d82SVarun Wadekar
ivc_advance_rx(struct ivc * ivc)148*e2469d82SVarun Wadekar static inline void ivc_advance_rx(struct ivc *ivc)
149*e2469d82SVarun Wadekar {
150*e2469d82SVarun Wadekar ivc->rx_channel->r_count++;
151*e2469d82SVarun Wadekar
152*e2469d82SVarun Wadekar if (ivc->r_pos == (ivc->nframes - (uint32_t)1U)) {
153*e2469d82SVarun Wadekar ivc->r_pos = 0U;
154*e2469d82SVarun Wadekar } else {
155*e2469d82SVarun Wadekar ivc->r_pos++;
156*e2469d82SVarun Wadekar }
157*e2469d82SVarun Wadekar }
158*e2469d82SVarun Wadekar
ivc_check_read(const struct ivc * ivc)159*e2469d82SVarun Wadekar static inline int32_t ivc_check_read(const struct ivc *ivc)
160*e2469d82SVarun Wadekar {
161*e2469d82SVarun Wadekar /*
162*e2469d82SVarun Wadekar * tx_channel->state is set locally, so it is not synchronized with
163*e2469d82SVarun Wadekar * state from the remote peer. The remote peer cannot reset its
164*e2469d82SVarun Wadekar * transmit counters until we've acknowledged its synchronization
165*e2469d82SVarun Wadekar * request, so no additional synchronization is required because an
166*e2469d82SVarun Wadekar * asynchronous transition of rx_channel->state to ivc_state_ack is not
167*e2469d82SVarun Wadekar * allowed.
168*e2469d82SVarun Wadekar */
169*e2469d82SVarun Wadekar if (ivc->tx_channel->state != ivc_state_established) {
170*e2469d82SVarun Wadekar return -ECONNRESET;
171*e2469d82SVarun Wadekar }
172*e2469d82SVarun Wadekar
173*e2469d82SVarun Wadekar /*
174*e2469d82SVarun Wadekar * Avoid unnecessary invalidations when performing repeated accesses to
175*e2469d82SVarun Wadekar * an IVC channel by checking the old queue pointers first.
176*e2469d82SVarun Wadekar * Synchronization is only necessary when these pointers indicate empty
177*e2469d82SVarun Wadekar * or full.
178*e2469d82SVarun Wadekar */
179*e2469d82SVarun Wadekar if (!ivc_channel_empty(ivc, ivc->rx_channel)) {
180*e2469d82SVarun Wadekar return 0;
181*e2469d82SVarun Wadekar }
182*e2469d82SVarun Wadekar
183*e2469d82SVarun Wadekar return ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
184*e2469d82SVarun Wadekar }
185*e2469d82SVarun Wadekar
ivc_check_write(const struct ivc * ivc)186*e2469d82SVarun Wadekar static inline int32_t ivc_check_write(const struct ivc *ivc)
187*e2469d82SVarun Wadekar {
188*e2469d82SVarun Wadekar if (ivc->tx_channel->state != ivc_state_established) {
189*e2469d82SVarun Wadekar return -ECONNRESET;
190*e2469d82SVarun Wadekar }
191*e2469d82SVarun Wadekar
192*e2469d82SVarun Wadekar if (!ivc_channel_full(ivc, ivc->tx_channel)) {
193*e2469d82SVarun Wadekar return 0;
194*e2469d82SVarun Wadekar }
195*e2469d82SVarun Wadekar
196*e2469d82SVarun Wadekar return ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
197*e2469d82SVarun Wadekar }
198*e2469d82SVarun Wadekar
tegra_ivc_can_read(const struct ivc * ivc)199*e2469d82SVarun Wadekar bool tegra_ivc_can_read(const struct ivc *ivc)
200*e2469d82SVarun Wadekar {
201*e2469d82SVarun Wadekar return ivc_check_read(ivc) == 0;
202*e2469d82SVarun Wadekar }
203*e2469d82SVarun Wadekar
tegra_ivc_can_write(const struct ivc * ivc)204*e2469d82SVarun Wadekar bool tegra_ivc_can_write(const struct ivc *ivc)
205*e2469d82SVarun Wadekar {
206*e2469d82SVarun Wadekar return ivc_check_write(ivc) == 0;
207*e2469d82SVarun Wadekar }
208*e2469d82SVarun Wadekar
tegra_ivc_tx_empty(const struct ivc * ivc)209*e2469d82SVarun Wadekar bool tegra_ivc_tx_empty(const struct ivc *ivc)
210*e2469d82SVarun Wadekar {
211*e2469d82SVarun Wadekar return ivc_channel_empty(ivc, ivc->tx_channel);
212*e2469d82SVarun Wadekar }
213*e2469d82SVarun Wadekar
calc_frame_offset(uint32_t frame_index,uint32_t frame_size,uint32_t frame_offset)214*e2469d82SVarun Wadekar static inline uintptr_t calc_frame_offset(uint32_t frame_index,
215*e2469d82SVarun Wadekar uint32_t frame_size, uint32_t frame_offset)
216*e2469d82SVarun Wadekar {
217*e2469d82SVarun Wadekar return ((uintptr_t)frame_index * (uintptr_t)frame_size) +
218*e2469d82SVarun Wadekar (uintptr_t)frame_offset;
219*e2469d82SVarun Wadekar }
220*e2469d82SVarun Wadekar
ivc_frame_pointer(const struct ivc * ivc,volatile const struct ivc_channel_header * ch,uint32_t frame)221*e2469d82SVarun Wadekar static void *ivc_frame_pointer(const struct ivc *ivc,
222*e2469d82SVarun Wadekar volatile const struct ivc_channel_header *ch,
223*e2469d82SVarun Wadekar uint32_t frame)
224*e2469d82SVarun Wadekar {
225*e2469d82SVarun Wadekar assert(frame < ivc->nframes);
226*e2469d82SVarun Wadekar return (void *)((uintptr_t)(&ch[1]) +
227*e2469d82SVarun Wadekar calc_frame_offset(frame, ivc->frame_size, 0));
228*e2469d82SVarun Wadekar }
229*e2469d82SVarun Wadekar
tegra_ivc_read(struct ivc * ivc,void * buf,size_t max_read)230*e2469d82SVarun Wadekar int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read)
231*e2469d82SVarun Wadekar {
232*e2469d82SVarun Wadekar const void *src;
233*e2469d82SVarun Wadekar int32_t result;
234*e2469d82SVarun Wadekar
235*e2469d82SVarun Wadekar if (buf == NULL) {
236*e2469d82SVarun Wadekar return -EINVAL;
237*e2469d82SVarun Wadekar }
238*e2469d82SVarun Wadekar
239*e2469d82SVarun Wadekar if (max_read > ivc->frame_size) {
240*e2469d82SVarun Wadekar return -E2BIG;
241*e2469d82SVarun Wadekar }
242*e2469d82SVarun Wadekar
243*e2469d82SVarun Wadekar result = ivc_check_read(ivc);
244*e2469d82SVarun Wadekar if (result != 0) {
245*e2469d82SVarun Wadekar return result;
246*e2469d82SVarun Wadekar }
247*e2469d82SVarun Wadekar
248*e2469d82SVarun Wadekar /*
249*e2469d82SVarun Wadekar * Order observation of w_pos potentially indicating new data before
250*e2469d82SVarun Wadekar * data read.
251*e2469d82SVarun Wadekar */
252*e2469d82SVarun Wadekar dmbish();
253*e2469d82SVarun Wadekar
254*e2469d82SVarun Wadekar src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
255*e2469d82SVarun Wadekar
256*e2469d82SVarun Wadekar (void)memcpy(buf, src, max_read);
257*e2469d82SVarun Wadekar
258*e2469d82SVarun Wadekar ivc_advance_rx(ivc);
259*e2469d82SVarun Wadekar
260*e2469d82SVarun Wadekar /*
261*e2469d82SVarun Wadekar * Ensure our write to r_pos occurs before our read from w_pos.
262*e2469d82SVarun Wadekar */
263*e2469d82SVarun Wadekar dmbish();
264*e2469d82SVarun Wadekar
265*e2469d82SVarun Wadekar /*
266*e2469d82SVarun Wadekar * Notify only upon transition from full to non-full.
267*e2469d82SVarun Wadekar * The available count can only asynchronously increase, so the
268*e2469d82SVarun Wadekar * worst possible side-effect will be a spurious notification.
269*e2469d82SVarun Wadekar */
270*e2469d82SVarun Wadekar if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
271*e2469d82SVarun Wadekar ivc->notify(ivc);
272*e2469d82SVarun Wadekar }
273*e2469d82SVarun Wadekar
274*e2469d82SVarun Wadekar return (int32_t)max_read;
275*e2469d82SVarun Wadekar }
276*e2469d82SVarun Wadekar
277*e2469d82SVarun Wadekar /* directly peek at the next frame rx'ed */
tegra_ivc_read_get_next_frame(const struct ivc * ivc)278*e2469d82SVarun Wadekar void *tegra_ivc_read_get_next_frame(const struct ivc *ivc)
279*e2469d82SVarun Wadekar {
280*e2469d82SVarun Wadekar if (ivc_check_read(ivc) != 0) {
281*e2469d82SVarun Wadekar return NULL;
282*e2469d82SVarun Wadekar }
283*e2469d82SVarun Wadekar
284*e2469d82SVarun Wadekar /*
285*e2469d82SVarun Wadekar * Order observation of w_pos potentially indicating new data before
286*e2469d82SVarun Wadekar * data read.
287*e2469d82SVarun Wadekar */
288*e2469d82SVarun Wadekar dmbld();
289*e2469d82SVarun Wadekar
290*e2469d82SVarun Wadekar return ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
291*e2469d82SVarun Wadekar }
292*e2469d82SVarun Wadekar
tegra_ivc_read_advance(struct ivc * ivc)293*e2469d82SVarun Wadekar int32_t tegra_ivc_read_advance(struct ivc *ivc)
294*e2469d82SVarun Wadekar {
295*e2469d82SVarun Wadekar /*
296*e2469d82SVarun Wadekar * No read barriers or synchronization here: the caller is expected to
297*e2469d82SVarun Wadekar * have already observed the channel non-empty. This check is just to
298*e2469d82SVarun Wadekar * catch programming errors.
299*e2469d82SVarun Wadekar */
300*e2469d82SVarun Wadekar int32_t result = ivc_check_read(ivc);
301*e2469d82SVarun Wadekar if (result != 0) {
302*e2469d82SVarun Wadekar return result;
303*e2469d82SVarun Wadekar }
304*e2469d82SVarun Wadekar
305*e2469d82SVarun Wadekar ivc_advance_rx(ivc);
306*e2469d82SVarun Wadekar
307*e2469d82SVarun Wadekar /*
308*e2469d82SVarun Wadekar * Ensure our write to r_pos occurs before our read from w_pos.
309*e2469d82SVarun Wadekar */
310*e2469d82SVarun Wadekar dmbish();
311*e2469d82SVarun Wadekar
312*e2469d82SVarun Wadekar /*
313*e2469d82SVarun Wadekar * Notify only upon transition from full to non-full.
314*e2469d82SVarun Wadekar * The available count can only asynchronously increase, so the
315*e2469d82SVarun Wadekar * worst possible side-effect will be a spurious notification.
316*e2469d82SVarun Wadekar */
317*e2469d82SVarun Wadekar if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
318*e2469d82SVarun Wadekar ivc->notify(ivc);
319*e2469d82SVarun Wadekar }
320*e2469d82SVarun Wadekar
321*e2469d82SVarun Wadekar return 0;
322*e2469d82SVarun Wadekar }
323*e2469d82SVarun Wadekar
tegra_ivc_write(struct ivc * ivc,const void * buf,size_t size)324*e2469d82SVarun Wadekar int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size)
325*e2469d82SVarun Wadekar {
326*e2469d82SVarun Wadekar void *p;
327*e2469d82SVarun Wadekar int32_t result;
328*e2469d82SVarun Wadekar
329*e2469d82SVarun Wadekar if ((buf == NULL) || (ivc == NULL)) {
330*e2469d82SVarun Wadekar return -EINVAL;
331*e2469d82SVarun Wadekar }
332*e2469d82SVarun Wadekar
333*e2469d82SVarun Wadekar if (size > ivc->frame_size) {
334*e2469d82SVarun Wadekar return -E2BIG;
335*e2469d82SVarun Wadekar }
336*e2469d82SVarun Wadekar
337*e2469d82SVarun Wadekar result = ivc_check_write(ivc);
338*e2469d82SVarun Wadekar if (result != 0) {
339*e2469d82SVarun Wadekar return result;
340*e2469d82SVarun Wadekar }
341*e2469d82SVarun Wadekar
342*e2469d82SVarun Wadekar p = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
343*e2469d82SVarun Wadekar
344*e2469d82SVarun Wadekar (void)memset(p, 0, ivc->frame_size);
345*e2469d82SVarun Wadekar (void)memcpy(p, buf, size);
346*e2469d82SVarun Wadekar
347*e2469d82SVarun Wadekar /*
348*e2469d82SVarun Wadekar * Ensure that updated data is visible before the w_pos counter
349*e2469d82SVarun Wadekar * indicates that it is ready.
350*e2469d82SVarun Wadekar */
351*e2469d82SVarun Wadekar dmbst();
352*e2469d82SVarun Wadekar
353*e2469d82SVarun Wadekar ivc_advance_tx(ivc);
354*e2469d82SVarun Wadekar
355*e2469d82SVarun Wadekar /*
356*e2469d82SVarun Wadekar * Ensure our write to w_pos occurs before our read from r_pos.
357*e2469d82SVarun Wadekar */
358*e2469d82SVarun Wadekar dmbish();
359*e2469d82SVarun Wadekar
360*e2469d82SVarun Wadekar /*
361*e2469d82SVarun Wadekar * Notify only upon transition from empty to non-empty.
362*e2469d82SVarun Wadekar * The available count can only asynchronously decrease, so the
363*e2469d82SVarun Wadekar * worst possible side-effect will be a spurious notification.
364*e2469d82SVarun Wadekar */
365*e2469d82SVarun Wadekar if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1U) {
366*e2469d82SVarun Wadekar ivc->notify(ivc);
367*e2469d82SVarun Wadekar }
368*e2469d82SVarun Wadekar
369*e2469d82SVarun Wadekar return (int32_t)size;
370*e2469d82SVarun Wadekar }
371*e2469d82SVarun Wadekar
372*e2469d82SVarun Wadekar /* directly poke at the next frame to be tx'ed */
tegra_ivc_write_get_next_frame(const struct ivc * ivc)373*e2469d82SVarun Wadekar void *tegra_ivc_write_get_next_frame(const struct ivc *ivc)
374*e2469d82SVarun Wadekar {
375*e2469d82SVarun Wadekar if (ivc_check_write(ivc) != 0) {
376*e2469d82SVarun Wadekar return NULL;
377*e2469d82SVarun Wadekar }
378*e2469d82SVarun Wadekar
379*e2469d82SVarun Wadekar return ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
380*e2469d82SVarun Wadekar }
381*e2469d82SVarun Wadekar
382*e2469d82SVarun Wadekar /* advance the tx buffer */
tegra_ivc_write_advance(struct ivc * ivc)383*e2469d82SVarun Wadekar int32_t tegra_ivc_write_advance(struct ivc *ivc)
384*e2469d82SVarun Wadekar {
385*e2469d82SVarun Wadekar int32_t result = ivc_check_write(ivc);
386*e2469d82SVarun Wadekar
387*e2469d82SVarun Wadekar if (result != 0) {
388*e2469d82SVarun Wadekar return result;
389*e2469d82SVarun Wadekar }
390*e2469d82SVarun Wadekar
391*e2469d82SVarun Wadekar /*
392*e2469d82SVarun Wadekar * Order any possible stores to the frame before update of w_pos.
393*e2469d82SVarun Wadekar */
394*e2469d82SVarun Wadekar dmbst();
395*e2469d82SVarun Wadekar
396*e2469d82SVarun Wadekar ivc_advance_tx(ivc);
397*e2469d82SVarun Wadekar
398*e2469d82SVarun Wadekar /*
399*e2469d82SVarun Wadekar * Ensure our write to w_pos occurs before our read from r_pos.
400*e2469d82SVarun Wadekar */
401*e2469d82SVarun Wadekar dmbish();
402*e2469d82SVarun Wadekar
403*e2469d82SVarun Wadekar /*
404*e2469d82SVarun Wadekar * Notify only upon transition from empty to non-empty.
405*e2469d82SVarun Wadekar * The available count can only asynchronously decrease, so the
406*e2469d82SVarun Wadekar * worst possible side-effect will be a spurious notification.
407*e2469d82SVarun Wadekar */
408*e2469d82SVarun Wadekar if (ivc_channel_avail_count(ivc, ivc->tx_channel) == (uint32_t)1U) {
409*e2469d82SVarun Wadekar ivc->notify(ivc);
410*e2469d82SVarun Wadekar }
411*e2469d82SVarun Wadekar
412*e2469d82SVarun Wadekar return 0;
413*e2469d82SVarun Wadekar }
414*e2469d82SVarun Wadekar
tegra_ivc_channel_reset(const struct ivc * ivc)415*e2469d82SVarun Wadekar void tegra_ivc_channel_reset(const struct ivc *ivc)
416*e2469d82SVarun Wadekar {
417*e2469d82SVarun Wadekar ivc->tx_channel->state = ivc_state_sync;
418*e2469d82SVarun Wadekar ivc->notify(ivc);
419*e2469d82SVarun Wadekar }
420*e2469d82SVarun Wadekar
421*e2469d82SVarun Wadekar /*
422*e2469d82SVarun Wadekar * ===============================================================
423*e2469d82SVarun Wadekar * IVC State Transition Table - see tegra_ivc_channel_notified()
424*e2469d82SVarun Wadekar * ===============================================================
425*e2469d82SVarun Wadekar *
426*e2469d82SVarun Wadekar * local remote action
427*e2469d82SVarun Wadekar * ----- ------ -----------------------------------
428*e2469d82SVarun Wadekar * SYNC EST <none>
429*e2469d82SVarun Wadekar * SYNC ACK reset counters; move to EST; notify
430*e2469d82SVarun Wadekar * SYNC SYNC reset counters; move to ACK; notify
431*e2469d82SVarun Wadekar * ACK EST move to EST; notify
432*e2469d82SVarun Wadekar * ACK ACK move to EST; notify
433*e2469d82SVarun Wadekar * ACK SYNC reset counters; move to ACK; notify
434*e2469d82SVarun Wadekar * EST EST <none>
435*e2469d82SVarun Wadekar * EST ACK <none>
436*e2469d82SVarun Wadekar * EST SYNC reset counters; move to ACK; notify
437*e2469d82SVarun Wadekar *
438*e2469d82SVarun Wadekar * ===============================================================
439*e2469d82SVarun Wadekar */
tegra_ivc_channel_notified(struct ivc * ivc)440*e2469d82SVarun Wadekar int32_t tegra_ivc_channel_notified(struct ivc *ivc)
441*e2469d82SVarun Wadekar {
442*e2469d82SVarun Wadekar uint32_t peer_state;
443*e2469d82SVarun Wadekar
444*e2469d82SVarun Wadekar /* Copy the receiver's state out of shared memory. */
445*e2469d82SVarun Wadekar peer_state = ivc->rx_channel->state;
446*e2469d82SVarun Wadekar
447*e2469d82SVarun Wadekar if (peer_state == (uint32_t)ivc_state_sync) {
448*e2469d82SVarun Wadekar /*
449*e2469d82SVarun Wadekar * Order observation of ivc_state_sync before stores clearing
450*e2469d82SVarun Wadekar * tx_channel.
451*e2469d82SVarun Wadekar */
452*e2469d82SVarun Wadekar dmbld();
453*e2469d82SVarun Wadekar
454*e2469d82SVarun Wadekar /*
455*e2469d82SVarun Wadekar * Reset tx_channel counters. The remote end is in the SYNC
456*e2469d82SVarun Wadekar * state and won't make progress until we change our state,
457*e2469d82SVarun Wadekar * so the counters are not in use at this time.
458*e2469d82SVarun Wadekar */
459*e2469d82SVarun Wadekar ivc->tx_channel->w_count = 0U;
460*e2469d82SVarun Wadekar ivc->rx_channel->r_count = 0U;
461*e2469d82SVarun Wadekar
462*e2469d82SVarun Wadekar ivc->w_pos = 0U;
463*e2469d82SVarun Wadekar ivc->r_pos = 0U;
464*e2469d82SVarun Wadekar
465*e2469d82SVarun Wadekar /*
466*e2469d82SVarun Wadekar * Ensure that counters appear cleared before new state can be
467*e2469d82SVarun Wadekar * observed.
468*e2469d82SVarun Wadekar */
469*e2469d82SVarun Wadekar dmbst();
470*e2469d82SVarun Wadekar
471*e2469d82SVarun Wadekar /*
472*e2469d82SVarun Wadekar * Move to ACK state. We have just cleared our counters, so it
473*e2469d82SVarun Wadekar * is now safe for the remote end to start using these values.
474*e2469d82SVarun Wadekar */
475*e2469d82SVarun Wadekar ivc->tx_channel->state = ivc_state_ack;
476*e2469d82SVarun Wadekar
477*e2469d82SVarun Wadekar /*
478*e2469d82SVarun Wadekar * Notify remote end to observe state transition.
479*e2469d82SVarun Wadekar */
480*e2469d82SVarun Wadekar ivc->notify(ivc);
481*e2469d82SVarun Wadekar
482*e2469d82SVarun Wadekar } else if ((ivc->tx_channel->state == (uint32_t)ivc_state_sync) &&
483*e2469d82SVarun Wadekar (peer_state == (uint32_t)ivc_state_ack)) {
484*e2469d82SVarun Wadekar /*
485*e2469d82SVarun Wadekar * Order observation of ivc_state_sync before stores clearing
486*e2469d82SVarun Wadekar * tx_channel.
487*e2469d82SVarun Wadekar */
488*e2469d82SVarun Wadekar dmbld();
489*e2469d82SVarun Wadekar
490*e2469d82SVarun Wadekar /*
491*e2469d82SVarun Wadekar * Reset tx_channel counters. The remote end is in the ACK
492*e2469d82SVarun Wadekar * state and won't make progress until we change our state,
493*e2469d82SVarun Wadekar * so the counters are not in use at this time.
494*e2469d82SVarun Wadekar */
495*e2469d82SVarun Wadekar ivc->tx_channel->w_count = 0U;
496*e2469d82SVarun Wadekar ivc->rx_channel->r_count = 0U;
497*e2469d82SVarun Wadekar
498*e2469d82SVarun Wadekar ivc->w_pos = 0U;
499*e2469d82SVarun Wadekar ivc->r_pos = 0U;
500*e2469d82SVarun Wadekar
501*e2469d82SVarun Wadekar /*
502*e2469d82SVarun Wadekar * Ensure that counters appear cleared before new state can be
503*e2469d82SVarun Wadekar * observed.
504*e2469d82SVarun Wadekar */
505*e2469d82SVarun Wadekar dmbst();
506*e2469d82SVarun Wadekar
507*e2469d82SVarun Wadekar /*
508*e2469d82SVarun Wadekar * Move to ESTABLISHED state. We know that the remote end has
509*e2469d82SVarun Wadekar * already cleared its counters, so it is safe to start
510*e2469d82SVarun Wadekar * writing/reading on this channel.
511*e2469d82SVarun Wadekar */
512*e2469d82SVarun Wadekar ivc->tx_channel->state = ivc_state_established;
513*e2469d82SVarun Wadekar
514*e2469d82SVarun Wadekar /*
515*e2469d82SVarun Wadekar * Notify remote end to observe state transition.
516*e2469d82SVarun Wadekar */
517*e2469d82SVarun Wadekar ivc->notify(ivc);
518*e2469d82SVarun Wadekar
519*e2469d82SVarun Wadekar } else if (ivc->tx_channel->state == (uint32_t)ivc_state_ack) {
520*e2469d82SVarun Wadekar /*
521*e2469d82SVarun Wadekar * At this point, we have observed the peer to be in either
522*e2469d82SVarun Wadekar * the ACK or ESTABLISHED state. Next, order observation of
523*e2469d82SVarun Wadekar * peer state before storing to tx_channel.
524*e2469d82SVarun Wadekar */
525*e2469d82SVarun Wadekar dmbld();
526*e2469d82SVarun Wadekar
527*e2469d82SVarun Wadekar /*
528*e2469d82SVarun Wadekar * Move to ESTABLISHED state. We know that we have previously
529*e2469d82SVarun Wadekar * cleared our counters, and we know that the remote end has
530*e2469d82SVarun Wadekar * cleared its counters, so it is safe to start writing/reading
531*e2469d82SVarun Wadekar * on this channel.
532*e2469d82SVarun Wadekar */
533*e2469d82SVarun Wadekar ivc->tx_channel->state = ivc_state_established;
534*e2469d82SVarun Wadekar
535*e2469d82SVarun Wadekar /*
536*e2469d82SVarun Wadekar * Notify remote end to observe state transition.
537*e2469d82SVarun Wadekar */
538*e2469d82SVarun Wadekar ivc->notify(ivc);
539*e2469d82SVarun Wadekar
540*e2469d82SVarun Wadekar } else {
541*e2469d82SVarun Wadekar /*
542*e2469d82SVarun Wadekar * There is no need to handle any further action. Either the
543*e2469d82SVarun Wadekar * channel is already fully established, or we are waiting for
544*e2469d82SVarun Wadekar * the remote end to catch up with our current state. Refer
545*e2469d82SVarun Wadekar * to the diagram in "IVC State Transition Table" above.
546*e2469d82SVarun Wadekar */
547*e2469d82SVarun Wadekar }
548*e2469d82SVarun Wadekar
549*e2469d82SVarun Wadekar return ((ivc->tx_channel->state == (uint32_t)ivc_state_established) ? 0 : -EAGAIN);
550*e2469d82SVarun Wadekar }
551*e2469d82SVarun Wadekar
tegra_ivc_align(size_t size)552*e2469d82SVarun Wadekar size_t tegra_ivc_align(size_t size)
553*e2469d82SVarun Wadekar {
554*e2469d82SVarun Wadekar return (size + (IVC_ALIGN - 1U)) & ~(IVC_ALIGN - 1U);
555*e2469d82SVarun Wadekar }
556*e2469d82SVarun Wadekar
tegra_ivc_total_queue_size(size_t queue_size)557*e2469d82SVarun Wadekar size_t tegra_ivc_total_queue_size(size_t queue_size)
558*e2469d82SVarun Wadekar {
559*e2469d82SVarun Wadekar if ((queue_size & (IVC_ALIGN - 1U)) != 0U) {
560*e2469d82SVarun Wadekar ERROR("queue_size (%d) must be %d-byte aligned\n",
561*e2469d82SVarun Wadekar (int32_t)queue_size, IVC_ALIGN);
562*e2469d82SVarun Wadekar return 0;
563*e2469d82SVarun Wadekar }
564*e2469d82SVarun Wadekar return queue_size + sizeof(struct ivc_channel_header);
565*e2469d82SVarun Wadekar }
566*e2469d82SVarun Wadekar
check_ivc_params(uintptr_t queue_base1,uintptr_t queue_base2,uint32_t nframes,uint32_t frame_size)567*e2469d82SVarun Wadekar static int32_t check_ivc_params(uintptr_t queue_base1, uintptr_t queue_base2,
568*e2469d82SVarun Wadekar uint32_t nframes, uint32_t frame_size)
569*e2469d82SVarun Wadekar {
570*e2469d82SVarun Wadekar assert((offsetof(struct ivc_channel_header, w_count)
571*e2469d82SVarun Wadekar & (IVC_ALIGN - 1U)) == 0U);
572*e2469d82SVarun Wadekar assert((offsetof(struct ivc_channel_header, r_count)
573*e2469d82SVarun Wadekar & (IVC_ALIGN - 1U)) == 0U);
574*e2469d82SVarun Wadekar assert((sizeof(struct ivc_channel_header) & (IVC_ALIGN - 1U)) == 0U);
575*e2469d82SVarun Wadekar
576*e2469d82SVarun Wadekar if (((uint64_t)nframes * (uint64_t)frame_size) >= 0x100000000ULL) {
577*e2469d82SVarun Wadekar ERROR("nframes * frame_size overflows\n");
578*e2469d82SVarun Wadekar return -EINVAL;
579*e2469d82SVarun Wadekar }
580*e2469d82SVarun Wadekar
581*e2469d82SVarun Wadekar /*
582*e2469d82SVarun Wadekar * The headers must at least be aligned enough for counters
583*e2469d82SVarun Wadekar * to be accessed atomically.
584*e2469d82SVarun Wadekar */
585*e2469d82SVarun Wadekar if ((queue_base1 & (IVC_ALIGN - 1U)) != 0U) {
586*e2469d82SVarun Wadekar ERROR("ivc channel start not aligned: %lx\n", queue_base1);
587*e2469d82SVarun Wadekar return -EINVAL;
588*e2469d82SVarun Wadekar }
589*e2469d82SVarun Wadekar if ((queue_base2 & (IVC_ALIGN - 1U)) != 0U) {
590*e2469d82SVarun Wadekar ERROR("ivc channel start not aligned: %lx\n", queue_base2);
591*e2469d82SVarun Wadekar return -EINVAL;
592*e2469d82SVarun Wadekar }
593*e2469d82SVarun Wadekar
594*e2469d82SVarun Wadekar if ((frame_size & (IVC_ALIGN - 1U)) != 0U) {
595*e2469d82SVarun Wadekar ERROR("frame size not adequately aligned: %u\n",
596*e2469d82SVarun Wadekar frame_size);
597*e2469d82SVarun Wadekar return -EINVAL;
598*e2469d82SVarun Wadekar }
599*e2469d82SVarun Wadekar
600*e2469d82SVarun Wadekar if (queue_base1 < queue_base2) {
601*e2469d82SVarun Wadekar if ((queue_base1 + ((uint64_t)frame_size * nframes)) > queue_base2) {
602*e2469d82SVarun Wadekar ERROR("queue regions overlap: %lx + %x, %x\n",
603*e2469d82SVarun Wadekar queue_base1, frame_size,
604*e2469d82SVarun Wadekar frame_size * nframes);
605*e2469d82SVarun Wadekar return -EINVAL;
606*e2469d82SVarun Wadekar }
607*e2469d82SVarun Wadekar } else {
608*e2469d82SVarun Wadekar if ((queue_base2 + ((uint64_t)frame_size * nframes)) > queue_base1) {
609*e2469d82SVarun Wadekar ERROR("queue regions overlap: %lx + %x, %x\n",
610*e2469d82SVarun Wadekar queue_base2, frame_size,
611*e2469d82SVarun Wadekar frame_size * nframes);
612*e2469d82SVarun Wadekar return -EINVAL;
613*e2469d82SVarun Wadekar }
614*e2469d82SVarun Wadekar }
615*e2469d82SVarun Wadekar
616*e2469d82SVarun Wadekar return 0;
617*e2469d82SVarun Wadekar }
618*e2469d82SVarun Wadekar
tegra_ivc_init(struct ivc * ivc,uintptr_t rx_base,uintptr_t tx_base,uint32_t nframes,uint32_t frame_size,ivc_notify_function notify)619*e2469d82SVarun Wadekar int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
620*e2469d82SVarun Wadekar uint32_t nframes, uint32_t frame_size,
621*e2469d82SVarun Wadekar ivc_notify_function notify)
622*e2469d82SVarun Wadekar {
623*e2469d82SVarun Wadekar int32_t result;
624*e2469d82SVarun Wadekar
625*e2469d82SVarun Wadekar /* sanity check input params */
626*e2469d82SVarun Wadekar if ((ivc == NULL) || (notify == NULL)) {
627*e2469d82SVarun Wadekar return -EINVAL;
628*e2469d82SVarun Wadekar }
629*e2469d82SVarun Wadekar
630*e2469d82SVarun Wadekar result = check_ivc_params(rx_base, tx_base, nframes, frame_size);
631*e2469d82SVarun Wadekar if (result != 0) {
632*e2469d82SVarun Wadekar return result;
633*e2469d82SVarun Wadekar }
634*e2469d82SVarun Wadekar
635*e2469d82SVarun Wadekar /*
636*e2469d82SVarun Wadekar * All sizes that can be returned by communication functions should
637*e2469d82SVarun Wadekar * fit in a 32-bit integer.
638*e2469d82SVarun Wadekar */
639*e2469d82SVarun Wadekar if (frame_size > (1u << 31)) {
640*e2469d82SVarun Wadekar return -E2BIG;
641*e2469d82SVarun Wadekar }
642*e2469d82SVarun Wadekar
643*e2469d82SVarun Wadekar ivc->rx_channel = (struct ivc_channel_header *)rx_base;
644*e2469d82SVarun Wadekar ivc->tx_channel = (struct ivc_channel_header *)tx_base;
645*e2469d82SVarun Wadekar ivc->notify = notify;
646*e2469d82SVarun Wadekar ivc->frame_size = frame_size;
647*e2469d82SVarun Wadekar ivc->nframes = nframes;
648*e2469d82SVarun Wadekar ivc->w_pos = 0U;
649*e2469d82SVarun Wadekar ivc->r_pos = 0U;
650*e2469d82SVarun Wadekar
651*e2469d82SVarun Wadekar INFO("%s: done\n", __func__);
652*e2469d82SVarun Wadekar
653*e2469d82SVarun Wadekar return 0;
654*e2469d82SVarun Wadekar }
655