xref: /arm-trusted-firmware/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <arch_helpers.h>
8*91f16700Schasinglulu #include <assert.h>
9*91f16700Schasinglulu #include <common/debug.h>
10*91f16700Schasinglulu #include <errno.h>
11*91f16700Schasinglulu #include <stdbool.h>
12*91f16700Schasinglulu #include <stddef.h>
13*91f16700Schasinglulu #include <string.h>
14*91f16700Schasinglulu 
15*91f16700Schasinglulu #include "ivc.h"
16*91f16700Schasinglulu 
17*91f16700Schasinglulu /*
18*91f16700Schasinglulu  * IVC channel reset protocol.
19*91f16700Schasinglulu  *
20*91f16700Schasinglulu  * Each end uses its tx_channel.state to indicate its synchronization state.
21*91f16700Schasinglulu  */
22*91f16700Schasinglulu enum {
23*91f16700Schasinglulu 	/*
24*91f16700Schasinglulu 	 * This value is zero for backwards compatibility with services that
25*91f16700Schasinglulu 	 * assume channels to be initially zeroed. Such channels are in an
26*91f16700Schasinglulu 	 * initially valid state, but cannot be asynchronously reset, and must
27*91f16700Schasinglulu 	 * maintain a valid state at all times.
28*91f16700Schasinglulu 	 *
29*91f16700Schasinglulu 	 * The transmitting end can enter the established state from the sync or
30*91f16700Schasinglulu 	 * ack state when it observes the receiving endpoint in the ack or
31*91f16700Schasinglulu 	 * established state, indicating that has cleared the counters in our
32*91f16700Schasinglulu 	 * rx_channel.
33*91f16700Schasinglulu 	 */
34*91f16700Schasinglulu 	ivc_state_established = U(0),
35*91f16700Schasinglulu 
36*91f16700Schasinglulu 	/*
37*91f16700Schasinglulu 	 * If an endpoint is observed in the sync state, the remote endpoint is
38*91f16700Schasinglulu 	 * allowed to clear the counters it owns asynchronously with respect to
39*91f16700Schasinglulu 	 * the current endpoint. Therefore, the current endpoint is no longer
40*91f16700Schasinglulu 	 * allowed to communicate.
41*91f16700Schasinglulu 	 */
42*91f16700Schasinglulu 	ivc_state_sync = U(1),
43*91f16700Schasinglulu 
44*91f16700Schasinglulu 	/*
45*91f16700Schasinglulu 	 * When the transmitting end observes the receiving end in the sync
46*91f16700Schasinglulu 	 * state, it can clear the w_count and r_count and transition to the ack
47*91f16700Schasinglulu 	 * state. If the remote endpoint observes us in the ack state, it can
48*91f16700Schasinglulu 	 * return to the established state once it has cleared its counters.
49*91f16700Schasinglulu 	 */
50*91f16700Schasinglulu 	ivc_state_ack = U(2)
51*91f16700Schasinglulu };
52*91f16700Schasinglulu 
53*91f16700Schasinglulu /*
54*91f16700Schasinglulu  * This structure is divided into two-cache aligned parts, the first is only
55*91f16700Schasinglulu  * written through the tx_channel pointer, while the second is only written
56*91f16700Schasinglulu  * through the rx_channel pointer. This delineates ownership of the cache lines,
57*91f16700Schasinglulu  * which is critical to performance and necessary in non-cache coherent
58*91f16700Schasinglulu  * implementations.
59*91f16700Schasinglulu  */
60*91f16700Schasinglulu struct ivc_channel_header {
61*91f16700Schasinglulu 	struct {
62*91f16700Schasinglulu 		/* fields owned by the transmitting end */
63*91f16700Schasinglulu 		uint32_t w_count;
64*91f16700Schasinglulu 		uint32_t state;
65*91f16700Schasinglulu 		uint32_t w_rsvd[IVC_CHHDR_TX_FIELDS - 2];
66*91f16700Schasinglulu 	};
67*91f16700Schasinglulu 	struct {
68*91f16700Schasinglulu 		/* fields owned by the receiving end */
69*91f16700Schasinglulu 		uint32_t r_count;
70*91f16700Schasinglulu 		uint32_t r_rsvd[IVC_CHHDR_RX_FIELDS - 1];
71*91f16700Schasinglulu 	};
72*91f16700Schasinglulu };
73*91f16700Schasinglulu 
74*91f16700Schasinglulu static inline bool ivc_channel_empty(const struct ivc *ivc,
75*91f16700Schasinglulu 		volatile const struct ivc_channel_header *ch)
76*91f16700Schasinglulu {
77*91f16700Schasinglulu 	/*
78*91f16700Schasinglulu 	 * This function performs multiple checks on the same values with
79*91f16700Schasinglulu 	 * security implications, so sample the counters' current values in
80*91f16700Schasinglulu 	 * shared memory to ensure that these checks use the same values.
81*91f16700Schasinglulu 	 */
82*91f16700Schasinglulu 	uint32_t wr_count = ch->w_count;
83*91f16700Schasinglulu 	uint32_t rd_count = ch->r_count;
84*91f16700Schasinglulu 	bool ret = false;
85*91f16700Schasinglulu 
86*91f16700Schasinglulu 	(void)ivc;
87*91f16700Schasinglulu 
88*91f16700Schasinglulu 	/*
89*91f16700Schasinglulu 	 * Perform an over-full check to prevent denial of service attacks where
90*91f16700Schasinglulu 	 * a server could be easily fooled into believing that there's an
91*91f16700Schasinglulu 	 * extremely large number of frames ready, since receivers are not
92*91f16700Schasinglulu 	 * expected to check for full or over-full conditions.
93*91f16700Schasinglulu 	 *
94*91f16700Schasinglulu 	 * Although the channel isn't empty, this is an invalid case caused by
95*91f16700Schasinglulu 	 * a potentially malicious peer, so returning empty is safer, because it
96*91f16700Schasinglulu 	 * gives the impression that the channel has gone silent.
97*91f16700Schasinglulu 	 */
98*91f16700Schasinglulu 	if (((wr_count - rd_count) > ivc->nframes) || (wr_count == rd_count)) {
99*91f16700Schasinglulu 		ret = true;
100*91f16700Schasinglulu 	}
101*91f16700Schasinglulu 
102*91f16700Schasinglulu 	return ret;
103*91f16700Schasinglulu }
104*91f16700Schasinglulu 
105*91f16700Schasinglulu static inline bool ivc_channel_full(const struct ivc *ivc,
106*91f16700Schasinglulu 		volatile const struct ivc_channel_header *ch)
107*91f16700Schasinglulu {
108*91f16700Schasinglulu 	uint32_t wr_count = ch->w_count;
109*91f16700Schasinglulu 	uint32_t rd_count = ch->r_count;
110*91f16700Schasinglulu 
111*91f16700Schasinglulu 	(void)ivc;
112*91f16700Schasinglulu 
113*91f16700Schasinglulu 	/*
114*91f16700Schasinglulu 	 * Invalid cases where the counters indicate that the queue is over
115*91f16700Schasinglulu 	 * capacity also appear full.
116*91f16700Schasinglulu 	 */
117*91f16700Schasinglulu 	return ((wr_count - rd_count) >= ivc->nframes);
118*91f16700Schasinglulu }
119*91f16700Schasinglulu 
120*91f16700Schasinglulu static inline uint32_t ivc_channel_avail_count(const struct ivc *ivc,
121*91f16700Schasinglulu 		volatile const struct ivc_channel_header *ch)
122*91f16700Schasinglulu {
123*91f16700Schasinglulu 	uint32_t wr_count = ch->w_count;
124*91f16700Schasinglulu 	uint32_t rd_count = ch->r_count;
125*91f16700Schasinglulu 
126*91f16700Schasinglulu 	(void)ivc;
127*91f16700Schasinglulu 
128*91f16700Schasinglulu 	/*
129*91f16700Schasinglulu 	 * This function isn't expected to be used in scenarios where an
130*91f16700Schasinglulu 	 * over-full situation can lead to denial of service attacks. See the
131*91f16700Schasinglulu 	 * comment in ivc_channel_empty() for an explanation about special
132*91f16700Schasinglulu 	 * over-full considerations.
133*91f16700Schasinglulu 	 */
134*91f16700Schasinglulu 	return (wr_count - rd_count);
135*91f16700Schasinglulu }
136*91f16700Schasinglulu 
137*91f16700Schasinglulu static inline void ivc_advance_tx(struct ivc *ivc)
138*91f16700Schasinglulu {
139*91f16700Schasinglulu 	ivc->tx_channel->w_count++;
140*91f16700Schasinglulu 
141*91f16700Schasinglulu 	if (ivc->w_pos == (ivc->nframes - (uint32_t)1U)) {
142*91f16700Schasinglulu 		ivc->w_pos = 0U;
143*91f16700Schasinglulu 	} else {
144*91f16700Schasinglulu 		ivc->w_pos++;
145*91f16700Schasinglulu 	}
146*91f16700Schasinglulu }
147*91f16700Schasinglulu 
148*91f16700Schasinglulu static inline void ivc_advance_rx(struct ivc *ivc)
149*91f16700Schasinglulu {
150*91f16700Schasinglulu 	ivc->rx_channel->r_count++;
151*91f16700Schasinglulu 
152*91f16700Schasinglulu 	if (ivc->r_pos == (ivc->nframes - (uint32_t)1U)) {
153*91f16700Schasinglulu 		ivc->r_pos = 0U;
154*91f16700Schasinglulu 	} else {
155*91f16700Schasinglulu 		ivc->r_pos++;
156*91f16700Schasinglulu 	}
157*91f16700Schasinglulu }
158*91f16700Schasinglulu 
159*91f16700Schasinglulu static inline int32_t ivc_check_read(const struct ivc *ivc)
160*91f16700Schasinglulu {
161*91f16700Schasinglulu 	/*
162*91f16700Schasinglulu 	 * tx_channel->state is set locally, so it is not synchronized with
163*91f16700Schasinglulu 	 * state from the remote peer. The remote peer cannot reset its
164*91f16700Schasinglulu 	 * transmit counters until we've acknowledged its synchronization
165*91f16700Schasinglulu 	 * request, so no additional synchronization is required because an
166*91f16700Schasinglulu 	 * asynchronous transition of rx_channel->state to ivc_state_ack is not
167*91f16700Schasinglulu 	 * allowed.
168*91f16700Schasinglulu 	 */
169*91f16700Schasinglulu 	if (ivc->tx_channel->state != ivc_state_established) {
170*91f16700Schasinglulu 		return -ECONNRESET;
171*91f16700Schasinglulu 	}
172*91f16700Schasinglulu 
173*91f16700Schasinglulu 	/*
174*91f16700Schasinglulu 	* Avoid unnecessary invalidations when performing repeated accesses to
175*91f16700Schasinglulu 	* an IVC channel by checking the old queue pointers first.
176*91f16700Schasinglulu 	* Synchronization is only necessary when these pointers indicate empty
177*91f16700Schasinglulu 	* or full.
178*91f16700Schasinglulu 	*/
179*91f16700Schasinglulu 	if (!ivc_channel_empty(ivc, ivc->rx_channel)) {
180*91f16700Schasinglulu 		return 0;
181*91f16700Schasinglulu 	}
182*91f16700Schasinglulu 
183*91f16700Schasinglulu 	return ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
184*91f16700Schasinglulu }
185*91f16700Schasinglulu 
186*91f16700Schasinglulu static inline int32_t ivc_check_write(const struct ivc *ivc)
187*91f16700Schasinglulu {
188*91f16700Schasinglulu 	if (ivc->tx_channel->state != ivc_state_established) {
189*91f16700Schasinglulu 		return -ECONNRESET;
190*91f16700Schasinglulu 	}
191*91f16700Schasinglulu 
192*91f16700Schasinglulu 	if (!ivc_channel_full(ivc, ivc->tx_channel)) {
193*91f16700Schasinglulu 		return 0;
194*91f16700Schasinglulu 	}
195*91f16700Schasinglulu 
196*91f16700Schasinglulu 	return ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
197*91f16700Schasinglulu }
198*91f16700Schasinglulu 
199*91f16700Schasinglulu bool tegra_ivc_can_read(const struct ivc *ivc)
200*91f16700Schasinglulu {
201*91f16700Schasinglulu 	return ivc_check_read(ivc) == 0;
202*91f16700Schasinglulu }
203*91f16700Schasinglulu 
204*91f16700Schasinglulu bool tegra_ivc_can_write(const struct ivc *ivc)
205*91f16700Schasinglulu {
206*91f16700Schasinglulu 	return ivc_check_write(ivc) == 0;
207*91f16700Schasinglulu }
208*91f16700Schasinglulu 
209*91f16700Schasinglulu bool tegra_ivc_tx_empty(const struct ivc *ivc)
210*91f16700Schasinglulu {
211*91f16700Schasinglulu 	return ivc_channel_empty(ivc, ivc->tx_channel);
212*91f16700Schasinglulu }
213*91f16700Schasinglulu 
214*91f16700Schasinglulu static inline uintptr_t calc_frame_offset(uint32_t frame_index,
215*91f16700Schasinglulu 	uint32_t frame_size, uint32_t frame_offset)
216*91f16700Schasinglulu {
217*91f16700Schasinglulu     return ((uintptr_t)frame_index * (uintptr_t)frame_size) +
218*91f16700Schasinglulu 	    (uintptr_t)frame_offset;
219*91f16700Schasinglulu }
220*91f16700Schasinglulu 
221*91f16700Schasinglulu static void *ivc_frame_pointer(const struct ivc *ivc,
222*91f16700Schasinglulu 				volatile const struct ivc_channel_header *ch,
223*91f16700Schasinglulu 				uint32_t frame)
224*91f16700Schasinglulu {
225*91f16700Schasinglulu 	assert(frame < ivc->nframes);
226*91f16700Schasinglulu 	return (void *)((uintptr_t)(&ch[1]) +
227*91f16700Schasinglulu 		calc_frame_offset(frame, ivc->frame_size, 0));
228*91f16700Schasinglulu }
229*91f16700Schasinglulu 
230*91f16700Schasinglulu int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read)
231*91f16700Schasinglulu {
232*91f16700Schasinglulu 	const void *src;
233*91f16700Schasinglulu 	int32_t result;
234*91f16700Schasinglulu 
235*91f16700Schasinglulu 	if (buf == NULL) {
236*91f16700Schasinglulu 		return -EINVAL;
237*91f16700Schasinglulu 	}
238*91f16700Schasinglulu 
239*91f16700Schasinglulu 	if (max_read > ivc->frame_size) {
240*91f16700Schasinglulu 		return -E2BIG;
241*91f16700Schasinglulu 	}
242*91f16700Schasinglulu 
243*91f16700Schasinglulu 	result = ivc_check_read(ivc);
244*91f16700Schasinglulu 	if (result != 0) {
245*91f16700Schasinglulu 		return result;
246*91f16700Schasinglulu 	}
247*91f16700Schasinglulu 
248*91f16700Schasinglulu 	/*
249*91f16700Schasinglulu 	 * Order observation of w_pos potentially indicating new data before
250*91f16700Schasinglulu 	 * data read.
251*91f16700Schasinglulu 	 */
252*91f16700Schasinglulu 	dmbish();
253*91f16700Schasinglulu 
254*91f16700Schasinglulu 	src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
255*91f16700Schasinglulu 
256*91f16700Schasinglulu 	(void)memcpy(buf, src, max_read);
257*91f16700Schasinglulu 
258*91f16700Schasinglulu 	ivc_advance_rx(ivc);
259*91f16700Schasinglulu 
260*91f16700Schasinglulu 	/*
261*91f16700Schasinglulu 	 * Ensure our write to r_pos occurs before our read from w_pos.
262*91f16700Schasinglulu 	 */
263*91f16700Schasinglulu 	dmbish();
264*91f16700Schasinglulu 
265*91f16700Schasinglulu 	/*
266*91f16700Schasinglulu 	 * Notify only upon transition from full to non-full.
267*91f16700Schasinglulu 	 * The available count can only asynchronously increase, so the
268*91f16700Schasinglulu 	 * worst possible side-effect will be a spurious notification.
269*91f16700Schasinglulu 	 */
270*91f16700Schasinglulu 	if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
271*91f16700Schasinglulu 		ivc->notify(ivc);
272*91f16700Schasinglulu 	}
273*91f16700Schasinglulu 
274*91f16700Schasinglulu 	return (int32_t)max_read;
275*91f16700Schasinglulu }
276*91f16700Schasinglulu 
277*91f16700Schasinglulu /* directly peek at the next frame rx'ed */
278*91f16700Schasinglulu void *tegra_ivc_read_get_next_frame(const struct ivc *ivc)
279*91f16700Schasinglulu {
280*91f16700Schasinglulu 	if (ivc_check_read(ivc) != 0) {
281*91f16700Schasinglulu 		return NULL;
282*91f16700Schasinglulu 	}
283*91f16700Schasinglulu 
284*91f16700Schasinglulu 	/*
285*91f16700Schasinglulu 	 * Order observation of w_pos potentially indicating new data before
286*91f16700Schasinglulu 	 * data read.
287*91f16700Schasinglulu 	 */
288*91f16700Schasinglulu 	dmbld();
289*91f16700Schasinglulu 
290*91f16700Schasinglulu 	return ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
291*91f16700Schasinglulu }
292*91f16700Schasinglulu 
293*91f16700Schasinglulu int32_t tegra_ivc_read_advance(struct ivc *ivc)
294*91f16700Schasinglulu {
295*91f16700Schasinglulu 	/*
296*91f16700Schasinglulu 	 * No read barriers or synchronization here: the caller is expected to
297*91f16700Schasinglulu 	 * have already observed the channel non-empty. This check is just to
298*91f16700Schasinglulu 	 * catch programming errors.
299*91f16700Schasinglulu 	 */
300*91f16700Schasinglulu 	int32_t result = ivc_check_read(ivc);
301*91f16700Schasinglulu 	if (result != 0) {
302*91f16700Schasinglulu 		return result;
303*91f16700Schasinglulu 	}
304*91f16700Schasinglulu 
305*91f16700Schasinglulu 	ivc_advance_rx(ivc);
306*91f16700Schasinglulu 
307*91f16700Schasinglulu 	/*
308*91f16700Schasinglulu 	 * Ensure our write to r_pos occurs before our read from w_pos.
309*91f16700Schasinglulu 	 */
310*91f16700Schasinglulu 	dmbish();
311*91f16700Schasinglulu 
312*91f16700Schasinglulu 	/*
313*91f16700Schasinglulu 	 * Notify only upon transition from full to non-full.
314*91f16700Schasinglulu 	 * The available count can only asynchronously increase, so the
315*91f16700Schasinglulu 	 * worst possible side-effect will be a spurious notification.
316*91f16700Schasinglulu 	 */
317*91f16700Schasinglulu 	if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
318*91f16700Schasinglulu 		ivc->notify(ivc);
319*91f16700Schasinglulu 	}
320*91f16700Schasinglulu 
321*91f16700Schasinglulu 	return 0;
322*91f16700Schasinglulu }
323*91f16700Schasinglulu 
324*91f16700Schasinglulu int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size)
325*91f16700Schasinglulu {
326*91f16700Schasinglulu 	void *p;
327*91f16700Schasinglulu 	int32_t result;
328*91f16700Schasinglulu 
329*91f16700Schasinglulu 	if ((buf == NULL) || (ivc == NULL)) {
330*91f16700Schasinglulu 		return -EINVAL;
331*91f16700Schasinglulu 	}
332*91f16700Schasinglulu 
333*91f16700Schasinglulu 	if (size > ivc->frame_size) {
334*91f16700Schasinglulu 		return -E2BIG;
335*91f16700Schasinglulu 	}
336*91f16700Schasinglulu 
337*91f16700Schasinglulu 	result = ivc_check_write(ivc);
338*91f16700Schasinglulu 	if (result != 0) {
339*91f16700Schasinglulu 		return result;
340*91f16700Schasinglulu 	}
341*91f16700Schasinglulu 
342*91f16700Schasinglulu 	p = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
343*91f16700Schasinglulu 
344*91f16700Schasinglulu 	(void)memset(p, 0, ivc->frame_size);
345*91f16700Schasinglulu 	(void)memcpy(p, buf, size);
346*91f16700Schasinglulu 
347*91f16700Schasinglulu 	/*
348*91f16700Schasinglulu 	 * Ensure that updated data is visible before the w_pos counter
349*91f16700Schasinglulu 	 * indicates that it is ready.
350*91f16700Schasinglulu 	 */
351*91f16700Schasinglulu 	dmbst();
352*91f16700Schasinglulu 
353*91f16700Schasinglulu 	ivc_advance_tx(ivc);
354*91f16700Schasinglulu 
355*91f16700Schasinglulu 	/*
356*91f16700Schasinglulu 	 * Ensure our write to w_pos occurs before our read from r_pos.
357*91f16700Schasinglulu 	 */
358*91f16700Schasinglulu 	dmbish();
359*91f16700Schasinglulu 
360*91f16700Schasinglulu 	/*
361*91f16700Schasinglulu 	 * Notify only upon transition from empty to non-empty.
362*91f16700Schasinglulu 	 * The available count can only asynchronously decrease, so the
363*91f16700Schasinglulu 	 * worst possible side-effect will be a spurious notification.
364*91f16700Schasinglulu 	 */
365*91f16700Schasinglulu 	if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1U) {
366*91f16700Schasinglulu 		ivc->notify(ivc);
367*91f16700Schasinglulu 	}
368*91f16700Schasinglulu 
369*91f16700Schasinglulu 	return (int32_t)size;
370*91f16700Schasinglulu }
371*91f16700Schasinglulu 
372*91f16700Schasinglulu /* directly poke at the next frame to be tx'ed */
373*91f16700Schasinglulu void *tegra_ivc_write_get_next_frame(const struct ivc *ivc)
374*91f16700Schasinglulu {
375*91f16700Schasinglulu 	if (ivc_check_write(ivc) != 0) {
376*91f16700Schasinglulu 		return NULL;
377*91f16700Schasinglulu 	}
378*91f16700Schasinglulu 
379*91f16700Schasinglulu 	return ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
380*91f16700Schasinglulu }
381*91f16700Schasinglulu 
382*91f16700Schasinglulu /* advance the tx buffer */
383*91f16700Schasinglulu int32_t tegra_ivc_write_advance(struct ivc *ivc)
384*91f16700Schasinglulu {
385*91f16700Schasinglulu 	int32_t result = ivc_check_write(ivc);
386*91f16700Schasinglulu 
387*91f16700Schasinglulu 	if (result != 0) {
388*91f16700Schasinglulu 		return result;
389*91f16700Schasinglulu 	}
390*91f16700Schasinglulu 
391*91f16700Schasinglulu 	/*
392*91f16700Schasinglulu 	 * Order any possible stores to the frame before update of w_pos.
393*91f16700Schasinglulu 	 */
394*91f16700Schasinglulu 	dmbst();
395*91f16700Schasinglulu 
396*91f16700Schasinglulu 	ivc_advance_tx(ivc);
397*91f16700Schasinglulu 
398*91f16700Schasinglulu 	/*
399*91f16700Schasinglulu 	 * Ensure our write to w_pos occurs before our read from r_pos.
400*91f16700Schasinglulu 	 */
401*91f16700Schasinglulu 	dmbish();
402*91f16700Schasinglulu 
403*91f16700Schasinglulu 	/*
404*91f16700Schasinglulu 	 * Notify only upon transition from empty to non-empty.
405*91f16700Schasinglulu 	 * The available count can only asynchronously decrease, so the
406*91f16700Schasinglulu 	 * worst possible side-effect will be a spurious notification.
407*91f16700Schasinglulu 	 */
408*91f16700Schasinglulu 	if (ivc_channel_avail_count(ivc, ivc->tx_channel) == (uint32_t)1U) {
409*91f16700Schasinglulu 		ivc->notify(ivc);
410*91f16700Schasinglulu 	}
411*91f16700Schasinglulu 
412*91f16700Schasinglulu 	return 0;
413*91f16700Schasinglulu }
414*91f16700Schasinglulu 
415*91f16700Schasinglulu void tegra_ivc_channel_reset(const struct ivc *ivc)
416*91f16700Schasinglulu {
417*91f16700Schasinglulu 	ivc->tx_channel->state = ivc_state_sync;
418*91f16700Schasinglulu 	ivc->notify(ivc);
419*91f16700Schasinglulu }
420*91f16700Schasinglulu 
421*91f16700Schasinglulu /*
422*91f16700Schasinglulu  * ===============================================================
423*91f16700Schasinglulu  *  IVC State Transition Table - see tegra_ivc_channel_notified()
424*91f16700Schasinglulu  * ===============================================================
425*91f16700Schasinglulu  *
426*91f16700Schasinglulu  *	local	remote	action
427*91f16700Schasinglulu  *	-----	------	-----------------------------------
428*91f16700Schasinglulu  *	SYNC	EST	<none>
429*91f16700Schasinglulu  *	SYNC	ACK	reset counters; move to EST; notify
430*91f16700Schasinglulu  *	SYNC	SYNC	reset counters; move to ACK; notify
431*91f16700Schasinglulu  *	ACK	EST	move to EST; notify
432*91f16700Schasinglulu  *	ACK	ACK	move to EST; notify
433*91f16700Schasinglulu  *	ACK	SYNC	reset counters; move to ACK; notify
434*91f16700Schasinglulu  *	EST	EST	<none>
435*91f16700Schasinglulu  *	EST	ACK	<none>
436*91f16700Schasinglulu  *	EST	SYNC	reset counters; move to ACK; notify
437*91f16700Schasinglulu  *
438*91f16700Schasinglulu  * ===============================================================
439*91f16700Schasinglulu  */
440*91f16700Schasinglulu int32_t tegra_ivc_channel_notified(struct ivc *ivc)
441*91f16700Schasinglulu {
442*91f16700Schasinglulu 	uint32_t peer_state;
443*91f16700Schasinglulu 
444*91f16700Schasinglulu 	/* Copy the receiver's state out of shared memory. */
445*91f16700Schasinglulu 	peer_state = ivc->rx_channel->state;
446*91f16700Schasinglulu 
447*91f16700Schasinglulu 	if (peer_state == (uint32_t)ivc_state_sync) {
448*91f16700Schasinglulu 		/*
449*91f16700Schasinglulu 		 * Order observation of ivc_state_sync before stores clearing
450*91f16700Schasinglulu 		 * tx_channel.
451*91f16700Schasinglulu 		 */
452*91f16700Schasinglulu 		dmbld();
453*91f16700Schasinglulu 
454*91f16700Schasinglulu 		/*
455*91f16700Schasinglulu 		 * Reset tx_channel counters. The remote end is in the SYNC
456*91f16700Schasinglulu 		 * state and won't make progress until we change our state,
457*91f16700Schasinglulu 		 * so the counters are not in use at this time.
458*91f16700Schasinglulu 		 */
459*91f16700Schasinglulu 		ivc->tx_channel->w_count = 0U;
460*91f16700Schasinglulu 		ivc->rx_channel->r_count = 0U;
461*91f16700Schasinglulu 
462*91f16700Schasinglulu 		ivc->w_pos = 0U;
463*91f16700Schasinglulu 		ivc->r_pos = 0U;
464*91f16700Schasinglulu 
465*91f16700Schasinglulu 		/*
466*91f16700Schasinglulu 		 * Ensure that counters appear cleared before new state can be
467*91f16700Schasinglulu 		 * observed.
468*91f16700Schasinglulu 		 */
469*91f16700Schasinglulu 		dmbst();
470*91f16700Schasinglulu 
471*91f16700Schasinglulu 		/*
472*91f16700Schasinglulu 		 * Move to ACK state. We have just cleared our counters, so it
473*91f16700Schasinglulu 		 * is now safe for the remote end to start using these values.
474*91f16700Schasinglulu 		 */
475*91f16700Schasinglulu 		ivc->tx_channel->state = ivc_state_ack;
476*91f16700Schasinglulu 
477*91f16700Schasinglulu 		/*
478*91f16700Schasinglulu 		 * Notify remote end to observe state transition.
479*91f16700Schasinglulu 		 */
480*91f16700Schasinglulu 		ivc->notify(ivc);
481*91f16700Schasinglulu 
482*91f16700Schasinglulu 	} else if ((ivc->tx_channel->state == (uint32_t)ivc_state_sync) &&
483*91f16700Schasinglulu 			(peer_state == (uint32_t)ivc_state_ack)) {
484*91f16700Schasinglulu 		/*
485*91f16700Schasinglulu 		 * Order observation of ivc_state_sync before stores clearing
486*91f16700Schasinglulu 		 * tx_channel.
487*91f16700Schasinglulu 		 */
488*91f16700Schasinglulu 		dmbld();
489*91f16700Schasinglulu 
490*91f16700Schasinglulu 		/*
491*91f16700Schasinglulu 		 * Reset tx_channel counters. The remote end is in the ACK
492*91f16700Schasinglulu 		 * state and won't make progress until we change our state,
493*91f16700Schasinglulu 		 * so the counters are not in use at this time.
494*91f16700Schasinglulu 		 */
495*91f16700Schasinglulu 		ivc->tx_channel->w_count = 0U;
496*91f16700Schasinglulu 		ivc->rx_channel->r_count = 0U;
497*91f16700Schasinglulu 
498*91f16700Schasinglulu 		ivc->w_pos = 0U;
499*91f16700Schasinglulu 		ivc->r_pos = 0U;
500*91f16700Schasinglulu 
501*91f16700Schasinglulu 		/*
502*91f16700Schasinglulu 		 * Ensure that counters appear cleared before new state can be
503*91f16700Schasinglulu 		 * observed.
504*91f16700Schasinglulu 		 */
505*91f16700Schasinglulu 		dmbst();
506*91f16700Schasinglulu 
507*91f16700Schasinglulu 		/*
508*91f16700Schasinglulu 		 * Move to ESTABLISHED state. We know that the remote end has
509*91f16700Schasinglulu 		 * already cleared its counters, so it is safe to start
510*91f16700Schasinglulu 		 * writing/reading on this channel.
511*91f16700Schasinglulu 		 */
512*91f16700Schasinglulu 		ivc->tx_channel->state = ivc_state_established;
513*91f16700Schasinglulu 
514*91f16700Schasinglulu 		/*
515*91f16700Schasinglulu 		 * Notify remote end to observe state transition.
516*91f16700Schasinglulu 		 */
517*91f16700Schasinglulu 		ivc->notify(ivc);
518*91f16700Schasinglulu 
519*91f16700Schasinglulu 	} else if (ivc->tx_channel->state == (uint32_t)ivc_state_ack) {
520*91f16700Schasinglulu 		/*
521*91f16700Schasinglulu 		 * At this point, we have observed the peer to be in either
522*91f16700Schasinglulu 		 * the ACK or ESTABLISHED state. Next, order observation of
523*91f16700Schasinglulu 		 * peer state before storing to tx_channel.
524*91f16700Schasinglulu 		 */
525*91f16700Schasinglulu 		dmbld();
526*91f16700Schasinglulu 
527*91f16700Schasinglulu 		/*
528*91f16700Schasinglulu 		 * Move to ESTABLISHED state. We know that we have previously
529*91f16700Schasinglulu 		 * cleared our counters, and we know that the remote end has
530*91f16700Schasinglulu 		 * cleared its counters, so it is safe to start writing/reading
531*91f16700Schasinglulu 		 * on this channel.
532*91f16700Schasinglulu 		 */
533*91f16700Schasinglulu 		ivc->tx_channel->state = ivc_state_established;
534*91f16700Schasinglulu 
535*91f16700Schasinglulu 		/*
536*91f16700Schasinglulu 		 * Notify remote end to observe state transition.
537*91f16700Schasinglulu 		 */
538*91f16700Schasinglulu 		ivc->notify(ivc);
539*91f16700Schasinglulu 
540*91f16700Schasinglulu 	} else {
541*91f16700Schasinglulu 		/*
542*91f16700Schasinglulu 		 * There is no need to handle any further action. Either the
543*91f16700Schasinglulu 		 * channel is already fully established, or we are waiting for
544*91f16700Schasinglulu 		 * the remote end to catch up with our current state. Refer
545*91f16700Schasinglulu 		 * to the diagram in "IVC State Transition Table" above.
546*91f16700Schasinglulu 		 */
547*91f16700Schasinglulu 	}
548*91f16700Schasinglulu 
549*91f16700Schasinglulu 	return ((ivc->tx_channel->state == (uint32_t)ivc_state_established) ? 0 : -EAGAIN);
550*91f16700Schasinglulu }
551*91f16700Schasinglulu 
552*91f16700Schasinglulu size_t tegra_ivc_align(size_t size)
553*91f16700Schasinglulu {
554*91f16700Schasinglulu 	return (size + (IVC_ALIGN - 1U)) & ~(IVC_ALIGN - 1U);
555*91f16700Schasinglulu }
556*91f16700Schasinglulu 
557*91f16700Schasinglulu size_t tegra_ivc_total_queue_size(size_t queue_size)
558*91f16700Schasinglulu {
559*91f16700Schasinglulu 	if ((queue_size & (IVC_ALIGN - 1U)) != 0U) {
560*91f16700Schasinglulu 		ERROR("queue_size (%d) must be %d-byte aligned\n",
561*91f16700Schasinglulu 				(int32_t)queue_size, IVC_ALIGN);
562*91f16700Schasinglulu 		return 0;
563*91f16700Schasinglulu 	}
564*91f16700Schasinglulu 	return queue_size + sizeof(struct ivc_channel_header);
565*91f16700Schasinglulu }
566*91f16700Schasinglulu 
567*91f16700Schasinglulu static int32_t check_ivc_params(uintptr_t queue_base1, uintptr_t queue_base2,
568*91f16700Schasinglulu 		uint32_t nframes, uint32_t frame_size)
569*91f16700Schasinglulu {
570*91f16700Schasinglulu 	assert((offsetof(struct ivc_channel_header, w_count)
571*91f16700Schasinglulu 				& (IVC_ALIGN - 1U)) == 0U);
572*91f16700Schasinglulu 	assert((offsetof(struct ivc_channel_header, r_count)
573*91f16700Schasinglulu 				& (IVC_ALIGN - 1U)) == 0U);
574*91f16700Schasinglulu 	assert((sizeof(struct ivc_channel_header) & (IVC_ALIGN - 1U)) == 0U);
575*91f16700Schasinglulu 
576*91f16700Schasinglulu 	if (((uint64_t)nframes * (uint64_t)frame_size) >= 0x100000000ULL) {
577*91f16700Schasinglulu 		ERROR("nframes * frame_size overflows\n");
578*91f16700Schasinglulu 		return -EINVAL;
579*91f16700Schasinglulu 	}
580*91f16700Schasinglulu 
581*91f16700Schasinglulu 	/*
582*91f16700Schasinglulu 	 * The headers must at least be aligned enough for counters
583*91f16700Schasinglulu 	 * to be accessed atomically.
584*91f16700Schasinglulu 	 */
585*91f16700Schasinglulu 	if ((queue_base1 & (IVC_ALIGN - 1U)) != 0U) {
586*91f16700Schasinglulu 		ERROR("ivc channel start not aligned: %lx\n", queue_base1);
587*91f16700Schasinglulu 		return -EINVAL;
588*91f16700Schasinglulu 	}
589*91f16700Schasinglulu 	if ((queue_base2 & (IVC_ALIGN - 1U)) != 0U) {
590*91f16700Schasinglulu 		ERROR("ivc channel start not aligned: %lx\n", queue_base2);
591*91f16700Schasinglulu 		return -EINVAL;
592*91f16700Schasinglulu 	}
593*91f16700Schasinglulu 
594*91f16700Schasinglulu 	if ((frame_size & (IVC_ALIGN - 1U)) != 0U) {
595*91f16700Schasinglulu 		ERROR("frame size not adequately aligned: %u\n",
596*91f16700Schasinglulu 				frame_size);
597*91f16700Schasinglulu 		return -EINVAL;
598*91f16700Schasinglulu 	}
599*91f16700Schasinglulu 
600*91f16700Schasinglulu 	if (queue_base1 < queue_base2) {
601*91f16700Schasinglulu 		if ((queue_base1 + ((uint64_t)frame_size * nframes)) > queue_base2) {
602*91f16700Schasinglulu 			ERROR("queue regions overlap: %lx + %x, %x\n",
603*91f16700Schasinglulu 					queue_base1, frame_size,
604*91f16700Schasinglulu 					frame_size * nframes);
605*91f16700Schasinglulu 			return -EINVAL;
606*91f16700Schasinglulu 		}
607*91f16700Schasinglulu 	} else {
608*91f16700Schasinglulu 		if ((queue_base2 + ((uint64_t)frame_size * nframes)) > queue_base1) {
609*91f16700Schasinglulu 			ERROR("queue regions overlap: %lx + %x, %x\n",
610*91f16700Schasinglulu 					queue_base2, frame_size,
611*91f16700Schasinglulu 					frame_size * nframes);
612*91f16700Schasinglulu 			return -EINVAL;
613*91f16700Schasinglulu 		}
614*91f16700Schasinglulu 	}
615*91f16700Schasinglulu 
616*91f16700Schasinglulu 	return 0;
617*91f16700Schasinglulu }
618*91f16700Schasinglulu 
619*91f16700Schasinglulu int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
620*91f16700Schasinglulu 		uint32_t nframes, uint32_t frame_size,
621*91f16700Schasinglulu 		ivc_notify_function notify)
622*91f16700Schasinglulu {
623*91f16700Schasinglulu 	int32_t result;
624*91f16700Schasinglulu 
625*91f16700Schasinglulu 	/* sanity check input params */
626*91f16700Schasinglulu 	if ((ivc == NULL) || (notify == NULL)) {
627*91f16700Schasinglulu 		return -EINVAL;
628*91f16700Schasinglulu 	}
629*91f16700Schasinglulu 
630*91f16700Schasinglulu 	result = check_ivc_params(rx_base, tx_base, nframes, frame_size);
631*91f16700Schasinglulu 	if (result != 0) {
632*91f16700Schasinglulu 		return result;
633*91f16700Schasinglulu 	}
634*91f16700Schasinglulu 
635*91f16700Schasinglulu 	/*
636*91f16700Schasinglulu 	 * All sizes that can be returned by communication functions should
637*91f16700Schasinglulu 	 * fit in a 32-bit integer.
638*91f16700Schasinglulu 	 */
639*91f16700Schasinglulu 	if (frame_size > (1u << 31)) {
640*91f16700Schasinglulu 		return -E2BIG;
641*91f16700Schasinglulu 	}
642*91f16700Schasinglulu 
643*91f16700Schasinglulu 	ivc->rx_channel = (struct ivc_channel_header *)rx_base;
644*91f16700Schasinglulu 	ivc->tx_channel = (struct ivc_channel_header *)tx_base;
645*91f16700Schasinglulu 	ivc->notify = notify;
646*91f16700Schasinglulu 	ivc->frame_size = frame_size;
647*91f16700Schasinglulu 	ivc->nframes = nframes;
648*91f16700Schasinglulu 	ivc->w_pos = 0U;
649*91f16700Schasinglulu 	ivc->r_pos = 0U;
650*91f16700Schasinglulu 
651*91f16700Schasinglulu 	INFO("%s: done\n", __func__);
652*91f16700Schasinglulu 
653*91f16700Schasinglulu 	return 0;
654*91f16700Schasinglulu }
655