1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2016, NVIDIA CORPORATION.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <common.h>
8*4882a593Smuzhiyun #include <asm/io.h>
9*4882a593Smuzhiyun #include <asm/arch-tegra/ivc.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define TEGRA_IVC_ALIGN 64
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * IVC channel reset protocol.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Each end uses its tx_channel.state to indicate its synchronization state.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun enum ivc_state {
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * This value is zero for backwards compatibility with services that
21*4882a593Smuzhiyun * assume channels to be initially zeroed. Such channels are in an
22*4882a593Smuzhiyun * initially valid state, but cannot be asynchronously reset, and must
23*4882a593Smuzhiyun * maintain a valid state at all times.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * The transmitting end can enter the established state from the sync or
26*4882a593Smuzhiyun * ack state when it observes the receiving endpoint in the ack or
27*4882a593Smuzhiyun * established state, indicating that has cleared the counters in our
28*4882a593Smuzhiyun * rx_channel.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun ivc_state_established = 0,
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * If an endpoint is observed in the sync state, the remote endpoint is
34*4882a593Smuzhiyun * allowed to clear the counters it owns asynchronously with respect to
35*4882a593Smuzhiyun * the current endpoint. Therefore, the current endpoint is no longer
36*4882a593Smuzhiyun * allowed to communicate.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun ivc_state_sync,
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * When the transmitting end observes the receiving end in the sync
42*4882a593Smuzhiyun * state, it can clear the w_count and r_count and transition to the ack
43*4882a593Smuzhiyun * state. If the remote endpoint observes us in the ack state, it can
44*4882a593Smuzhiyun * return to the established state once it has cleared its counters.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun ivc_state_ack
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * This structure is divided into two-cache aligned parts, the first is only
51*4882a593Smuzhiyun * written through the tx_channel pointer, while the second is only written
52*4882a593Smuzhiyun * through the rx_channel pointer. This delineates ownership of the cache lines,
53*4882a593Smuzhiyun * which is critical to performance and necessary in non-cache coherent
54*4882a593Smuzhiyun * implementations.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun struct tegra_ivc_channel_header {
57*4882a593Smuzhiyun union {
58*4882a593Smuzhiyun /* fields owned by the transmitting end */
59*4882a593Smuzhiyun struct {
60*4882a593Smuzhiyun uint32_t w_count;
61*4882a593Smuzhiyun uint32_t state;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun uint8_t w_align[TEGRA_IVC_ALIGN];
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun union {
66*4882a593Smuzhiyun /* fields owned by the receiving end */
67*4882a593Smuzhiyun uint32_t r_count;
68*4882a593Smuzhiyun uint8_t r_align[TEGRA_IVC_ALIGN];
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
tegra_ivc_invalidate_counter(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * h,ulong offset)72*4882a593Smuzhiyun static inline void tegra_ivc_invalidate_counter(struct tegra_ivc *ivc,
73*4882a593Smuzhiyun struct tegra_ivc_channel_header *h,
74*4882a593Smuzhiyun ulong offset)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun ulong base = ((ulong)h) + offset;
77*4882a593Smuzhiyun invalidate_dcache_range(base, base + TEGRA_IVC_ALIGN);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
tegra_ivc_flush_counter(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * h,ulong offset)80*4882a593Smuzhiyun static inline void tegra_ivc_flush_counter(struct tegra_ivc *ivc,
81*4882a593Smuzhiyun struct tegra_ivc_channel_header *h,
82*4882a593Smuzhiyun ulong offset)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun ulong base = ((ulong)h) + offset;
85*4882a593Smuzhiyun flush_dcache_range(base, base + TEGRA_IVC_ALIGN);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
tegra_ivc_frame_addr(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * h,uint32_t frame)88*4882a593Smuzhiyun static inline ulong tegra_ivc_frame_addr(struct tegra_ivc *ivc,
89*4882a593Smuzhiyun struct tegra_ivc_channel_header *h,
90*4882a593Smuzhiyun uint32_t frame)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun BUG_ON(frame >= ivc->nframes);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return ((ulong)h) + sizeof(struct tegra_ivc_channel_header) +
95*4882a593Smuzhiyun (ivc->frame_size * frame);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
tegra_ivc_frame_pointer(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * ch,uint32_t frame)98*4882a593Smuzhiyun static inline void *tegra_ivc_frame_pointer(struct tegra_ivc *ivc,
99*4882a593Smuzhiyun struct tegra_ivc_channel_header *ch,
100*4882a593Smuzhiyun uint32_t frame)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return (void *)tegra_ivc_frame_addr(ivc, ch, frame);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
tegra_ivc_invalidate_frame(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * h,unsigned frame)105*4882a593Smuzhiyun static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
106*4882a593Smuzhiyun struct tegra_ivc_channel_header *h,
107*4882a593Smuzhiyun unsigned frame)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun ulong base = tegra_ivc_frame_addr(ivc, h, frame);
110*4882a593Smuzhiyun invalidate_dcache_range(base, base + ivc->frame_size);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
tegra_ivc_flush_frame(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * h,unsigned frame)113*4882a593Smuzhiyun static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
114*4882a593Smuzhiyun struct tegra_ivc_channel_header *h,
115*4882a593Smuzhiyun unsigned frame)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun ulong base = tegra_ivc_frame_addr(ivc, h, frame);
118*4882a593Smuzhiyun flush_dcache_range(base, base + ivc->frame_size);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
tegra_ivc_channel_empty(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * ch)121*4882a593Smuzhiyun static inline int tegra_ivc_channel_empty(struct tegra_ivc *ivc,
122*4882a593Smuzhiyun struct tegra_ivc_channel_header *ch)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * This function performs multiple checks on the same values with
126*4882a593Smuzhiyun * security implications, so create snapshots with ACCESS_ONCE() to
127*4882a593Smuzhiyun * ensure that these checks use the same values.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun uint32_t w_count = ACCESS_ONCE(ch->w_count);
130*4882a593Smuzhiyun uint32_t r_count = ACCESS_ONCE(ch->r_count);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Perform an over-full check to prevent denial of service attacks where
134*4882a593Smuzhiyun * a server could be easily fooled into believing that there's an
135*4882a593Smuzhiyun * extremely large number of frames ready, since receivers are not
136*4882a593Smuzhiyun * expected to check for full or over-full conditions.
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * Although the channel isn't empty, this is an invalid case caused by
139*4882a593Smuzhiyun * a potentially malicious peer, so returning empty is safer, because it
140*4882a593Smuzhiyun * gives the impression that the channel has gone silent.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun if (w_count - r_count > ivc->nframes)
143*4882a593Smuzhiyun return 1;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun return w_count == r_count;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
tegra_ivc_channel_full(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * ch)148*4882a593Smuzhiyun static inline int tegra_ivc_channel_full(struct tegra_ivc *ivc,
149*4882a593Smuzhiyun struct tegra_ivc_channel_header *ch)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Invalid cases where the counters indicate that the queue is over
153*4882a593Smuzhiyun * capacity also appear full.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun return (ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count)) >=
156*4882a593Smuzhiyun ivc->nframes;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
tegra_ivc_advance_rx(struct tegra_ivc * ivc)159*4882a593Smuzhiyun static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun ACCESS_ONCE(ivc->rx_channel->r_count) =
162*4882a593Smuzhiyun ACCESS_ONCE(ivc->rx_channel->r_count) + 1;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (ivc->r_pos == ivc->nframes - 1)
165*4882a593Smuzhiyun ivc->r_pos = 0;
166*4882a593Smuzhiyun else
167*4882a593Smuzhiyun ivc->r_pos++;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
tegra_ivc_advance_tx(struct tegra_ivc * ivc)170*4882a593Smuzhiyun static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun ACCESS_ONCE(ivc->tx_channel->w_count) =
173*4882a593Smuzhiyun ACCESS_ONCE(ivc->tx_channel->w_count) + 1;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (ivc->w_pos == ivc->nframes - 1)
176*4882a593Smuzhiyun ivc->w_pos = 0;
177*4882a593Smuzhiyun else
178*4882a593Smuzhiyun ivc->w_pos++;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
tegra_ivc_check_read(struct tegra_ivc * ivc)181*4882a593Smuzhiyun static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun ulong offset;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * tx_channel->state is set locally, so it is not synchronized with
187*4882a593Smuzhiyun * state from the remote peer. The remote peer cannot reset its
188*4882a593Smuzhiyun * transmit counters until we've acknowledged its synchronization
189*4882a593Smuzhiyun * request, so no additional synchronization is required because an
190*4882a593Smuzhiyun * asynchronous transition of rx_channel->state to ivc_state_ack is not
191*4882a593Smuzhiyun * allowed.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun if (ivc->tx_channel->state != ivc_state_established)
194*4882a593Smuzhiyun return -ECONNRESET;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * Avoid unnecessary invalidations when performing repeated accesses to
198*4882a593Smuzhiyun * an IVC channel by checking the old queue pointers first.
199*4882a593Smuzhiyun * Synchronization is only necessary when these pointers indicate empty
200*4882a593Smuzhiyun * or full.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun if (!tegra_ivc_channel_empty(ivc, ivc->rx_channel))
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
206*4882a593Smuzhiyun tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
207*4882a593Smuzhiyun return tegra_ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
tegra_ivc_check_write(struct tegra_ivc * ivc)210*4882a593Smuzhiyun static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun ulong offset;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (ivc->tx_channel->state != ivc_state_established)
215*4882a593Smuzhiyun return -ECONNRESET;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (!tegra_ivc_channel_full(ivc, ivc->tx_channel))
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, r_count);
221*4882a593Smuzhiyun tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
222*4882a593Smuzhiyun return tegra_ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
tegra_ivc_channel_avail_count(struct tegra_ivc * ivc,struct tegra_ivc_channel_header * ch)225*4882a593Smuzhiyun static inline uint32_t tegra_ivc_channel_avail_count(struct tegra_ivc *ivc,
226*4882a593Smuzhiyun struct tegra_ivc_channel_header *ch)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * This function isn't expected to be used in scenarios where an
230*4882a593Smuzhiyun * over-full situation can lead to denial of service attacks. See the
231*4882a593Smuzhiyun * comment in tegra_ivc_channel_empty() for an explanation about
232*4882a593Smuzhiyun * special over-full considerations.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun return ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
tegra_ivc_read_get_next_frame(struct tegra_ivc * ivc,void ** frame)237*4882a593Smuzhiyun int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, void **frame)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun int result = tegra_ivc_check_read(ivc);
240*4882a593Smuzhiyun if (result < 0)
241*4882a593Smuzhiyun return result;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * Order observation of w_pos potentially indicating new data before
245*4882a593Smuzhiyun * data read.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun mb();
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun tegra_ivc_invalidate_frame(ivc, ivc->rx_channel, ivc->r_pos);
250*4882a593Smuzhiyun *frame = tegra_ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
tegra_ivc_read_advance(struct tegra_ivc * ivc)255*4882a593Smuzhiyun int tegra_ivc_read_advance(struct tegra_ivc *ivc)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun ulong offset;
258*4882a593Smuzhiyun int result;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * No read barriers or synchronization here: the caller is expected to
262*4882a593Smuzhiyun * have already observed the channel non-empty. This check is just to
263*4882a593Smuzhiyun * catch programming errors.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun result = tegra_ivc_check_read(ivc);
266*4882a593Smuzhiyun if (result)
267*4882a593Smuzhiyun return result;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun tegra_ivc_advance_rx(ivc);
270*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, r_count);
271*4882a593Smuzhiyun tegra_ivc_flush_counter(ivc, ivc->rx_channel, offset);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * Ensure our write to r_pos occurs before our read from w_pos.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun mb();
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
279*4882a593Smuzhiyun tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (tegra_ivc_channel_avail_count(ivc, ivc->rx_channel) ==
282*4882a593Smuzhiyun ivc->nframes - 1)
283*4882a593Smuzhiyun ivc->notify(ivc);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
tegra_ivc_write_get_next_frame(struct tegra_ivc * ivc,void ** frame)288*4882a593Smuzhiyun int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, void **frame)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun int result = tegra_ivc_check_write(ivc);
291*4882a593Smuzhiyun if (result)
292*4882a593Smuzhiyun return result;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun *frame = tegra_ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
tegra_ivc_write_advance(struct tegra_ivc * ivc)299*4882a593Smuzhiyun int tegra_ivc_write_advance(struct tegra_ivc *ivc)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun ulong offset;
302*4882a593Smuzhiyun int result;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun result = tegra_ivc_check_write(ivc);
305*4882a593Smuzhiyun if (result)
306*4882a593Smuzhiyun return result;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun tegra_ivc_flush_frame(ivc, ivc->tx_channel, ivc->w_pos);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun * Order any possible stores to the frame before update of w_pos.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun mb();
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun tegra_ivc_advance_tx(ivc);
316*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
317*4882a593Smuzhiyun tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * Ensure our write to w_pos occurs before our read from r_pos.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun mb();
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, r_count);
325*4882a593Smuzhiyun tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (tegra_ivc_channel_avail_count(ivc, ivc->tx_channel) == 1)
328*4882a593Smuzhiyun ivc->notify(ivc);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun return 0;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * ===============================================================
335*4882a593Smuzhiyun * IVC State Transition Table - see tegra_ivc_channel_notified()
336*4882a593Smuzhiyun * ===============================================================
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * local remote action
339*4882a593Smuzhiyun * ----- ------ -----------------------------------
340*4882a593Smuzhiyun * SYNC EST <none>
341*4882a593Smuzhiyun * SYNC ACK reset counters; move to EST; notify
342*4882a593Smuzhiyun * SYNC SYNC reset counters; move to ACK; notify
343*4882a593Smuzhiyun * ACK EST move to EST; notify
344*4882a593Smuzhiyun * ACK ACK move to EST; notify
345*4882a593Smuzhiyun * ACK SYNC reset counters; move to ACK; notify
346*4882a593Smuzhiyun * EST EST <none>
347*4882a593Smuzhiyun * EST ACK <none>
348*4882a593Smuzhiyun * EST SYNC reset counters; move to ACK; notify
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * ===============================================================
351*4882a593Smuzhiyun */
tegra_ivc_channel_notified(struct tegra_ivc * ivc)352*4882a593Smuzhiyun int tegra_ivc_channel_notified(struct tegra_ivc *ivc)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun ulong offset;
355*4882a593Smuzhiyun enum ivc_state peer_state;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Copy the receiver's state out of shared memory. */
358*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
359*4882a593Smuzhiyun tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
360*4882a593Smuzhiyun peer_state = ACCESS_ONCE(ivc->rx_channel->state);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (peer_state == ivc_state_sync) {
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Order observation of ivc_state_sync before stores clearing
365*4882a593Smuzhiyun * tx_channel.
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun mb();
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Reset tx_channel counters. The remote end is in the SYNC
371*4882a593Smuzhiyun * state and won't make progress until we change our state,
372*4882a593Smuzhiyun * so the counters are not in use at this time.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun ivc->tx_channel->w_count = 0;
375*4882a593Smuzhiyun ivc->rx_channel->r_count = 0;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ivc->w_pos = 0;
378*4882a593Smuzhiyun ivc->r_pos = 0;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun * Ensure that counters appear cleared before new state can be
382*4882a593Smuzhiyun * observed.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun mb();
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * Move to ACK state. We have just cleared our counters, so it
388*4882a593Smuzhiyun * is now safe for the remote end to start using these values.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun ivc->tx_channel->state = ivc_state_ack;
391*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
392*4882a593Smuzhiyun tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * Notify remote end to observe state transition.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun ivc->notify(ivc);
398*4882a593Smuzhiyun } else if (ivc->tx_channel->state == ivc_state_sync &&
399*4882a593Smuzhiyun peer_state == ivc_state_ack) {
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Order observation of ivc_state_sync before stores clearing
402*4882a593Smuzhiyun * tx_channel.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun mb();
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * Reset tx_channel counters. The remote end is in the ACK
408*4882a593Smuzhiyun * state and won't make progress until we change our state,
409*4882a593Smuzhiyun * so the counters are not in use at this time.
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun ivc->tx_channel->w_count = 0;
412*4882a593Smuzhiyun ivc->rx_channel->r_count = 0;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ivc->w_pos = 0;
415*4882a593Smuzhiyun ivc->r_pos = 0;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * Ensure that counters appear cleared before new state can be
419*4882a593Smuzhiyun * observed.
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun mb();
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * Move to ESTABLISHED state. We know that the remote end has
425*4882a593Smuzhiyun * already cleared its counters, so it is safe to start
426*4882a593Smuzhiyun * writing/reading on this channel.
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun ivc->tx_channel->state = ivc_state_established;
429*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
430*4882a593Smuzhiyun tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /*
433*4882a593Smuzhiyun * Notify remote end to observe state transition.
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun ivc->notify(ivc);
436*4882a593Smuzhiyun } else if (ivc->tx_channel->state == ivc_state_ack) {
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * At this point, we have observed the peer to be in either
439*4882a593Smuzhiyun * the ACK or ESTABLISHED state. Next, order observation of
440*4882a593Smuzhiyun * peer state before storing to tx_channel.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun mb();
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * Move to ESTABLISHED state. We know that we have previously
446*4882a593Smuzhiyun * cleared our counters, and we know that the remote end has
447*4882a593Smuzhiyun * cleared its counters, so it is safe to start writing/reading
448*4882a593Smuzhiyun * on this channel.
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun ivc->tx_channel->state = ivc_state_established;
451*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
452*4882a593Smuzhiyun tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * Notify remote end to observe state transition.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun ivc->notify(ivc);
458*4882a593Smuzhiyun } else {
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun * There is no need to handle any further action. Either the
461*4882a593Smuzhiyun * channel is already fully established, or we are waiting for
462*4882a593Smuzhiyun * the remote end to catch up with our current state. Refer
463*4882a593Smuzhiyun * to the diagram in "IVC State Transition Table" above.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (ivc->tx_channel->state != ivc_state_established)
468*4882a593Smuzhiyun return -EAGAIN;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun return 0;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
tegra_ivc_channel_reset(struct tegra_ivc * ivc)473*4882a593Smuzhiyun void tegra_ivc_channel_reset(struct tegra_ivc *ivc)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun ulong offset;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun ivc->tx_channel->state = ivc_state_sync;
478*4882a593Smuzhiyun offset = offsetof(struct tegra_ivc_channel_header, w_count);
479*4882a593Smuzhiyun tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
480*4882a593Smuzhiyun ivc->notify(ivc);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
check_ivc_params(ulong qbase1,ulong qbase2,uint32_t nframes,uint32_t frame_size)483*4882a593Smuzhiyun static int check_ivc_params(ulong qbase1, ulong qbase2, uint32_t nframes,
484*4882a593Smuzhiyun uint32_t frame_size)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun int ret = 0;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun BUG_ON(offsetof(struct tegra_ivc_channel_header, w_count) &
489*4882a593Smuzhiyun (TEGRA_IVC_ALIGN - 1));
490*4882a593Smuzhiyun BUG_ON(offsetof(struct tegra_ivc_channel_header, r_count) &
491*4882a593Smuzhiyun (TEGRA_IVC_ALIGN - 1));
492*4882a593Smuzhiyun BUG_ON(sizeof(struct tegra_ivc_channel_header) &
493*4882a593Smuzhiyun (TEGRA_IVC_ALIGN - 1));
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) {
496*4882a593Smuzhiyun pr_err("tegra_ivc: nframes * frame_size overflows\n");
497*4882a593Smuzhiyun return -EINVAL;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun * The headers must at least be aligned enough for counters
502*4882a593Smuzhiyun * to be accessed atomically.
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun if ((qbase1 & (TEGRA_IVC_ALIGN - 1)) ||
505*4882a593Smuzhiyun (qbase2 & (TEGRA_IVC_ALIGN - 1))) {
506*4882a593Smuzhiyun pr_err("tegra_ivc: channel start not aligned\n");
507*4882a593Smuzhiyun return -EINVAL;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (frame_size & (TEGRA_IVC_ALIGN - 1)) {
511*4882a593Smuzhiyun pr_err("tegra_ivc: frame size not adequately aligned\n");
512*4882a593Smuzhiyun return -EINVAL;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (qbase1 < qbase2) {
516*4882a593Smuzhiyun if (qbase1 + frame_size * nframes > qbase2)
517*4882a593Smuzhiyun ret = -EINVAL;
518*4882a593Smuzhiyun } else {
519*4882a593Smuzhiyun if (qbase2 + frame_size * nframes > qbase1)
520*4882a593Smuzhiyun ret = -EINVAL;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if (ret) {
524*4882a593Smuzhiyun pr_err("tegra_ivc: queue regions overlap\n");
525*4882a593Smuzhiyun return ret;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun return 0;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
tegra_ivc_init(struct tegra_ivc * ivc,ulong rx_base,ulong tx_base,uint32_t nframes,uint32_t frame_size,void (* notify)(struct tegra_ivc *))531*4882a593Smuzhiyun int tegra_ivc_init(struct tegra_ivc *ivc, ulong rx_base, ulong tx_base,
532*4882a593Smuzhiyun uint32_t nframes, uint32_t frame_size,
533*4882a593Smuzhiyun void (*notify)(struct tegra_ivc *))
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun int ret;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (!ivc)
538*4882a593Smuzhiyun return -EINVAL;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun ret = check_ivc_params(rx_base, tx_base, nframes, frame_size);
541*4882a593Smuzhiyun if (ret)
542*4882a593Smuzhiyun return ret;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun ivc->rx_channel = (struct tegra_ivc_channel_header *)rx_base;
545*4882a593Smuzhiyun ivc->tx_channel = (struct tegra_ivc_channel_header *)tx_base;
546*4882a593Smuzhiyun ivc->w_pos = 0;
547*4882a593Smuzhiyun ivc->r_pos = 0;
548*4882a593Smuzhiyun ivc->nframes = nframes;
549*4882a593Smuzhiyun ivc->frame_size = frame_size;
550*4882a593Smuzhiyun ivc->notify = notify;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return 0;
553*4882a593Smuzhiyun }
554