1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION.
3 * Copright 2019, Data61
4 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
5 * ABN 41 687 119 230.
6 *
7 * This software may be distributed and modified according to the terms of
8 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
9 * See "LICENSE_GPLv2.txt" for details.
10 *
11 * @TAG(DATA61_GPL)
12 */
13
14/*
15 * This is a port of the Tegra IVC sources from U-Boot with some addtional
16 * modifications. Unfortunately there's no documentation in manuals whatsoever
17 * about this protocol.
18 *
19 * One of the biggest change is the removal of data cache invalidation and
20 * flushing functions. Memory backing the channels (which is usually device
21 * memory) should be mapped uncached.
22 */
23
24#include <errno.h>
25#include <utils/util.h>
26
27#include <tx2bpmp/ivc.h>
28
29#define TEGRA_IVC_ALIGN 64
30
31#define __ACCESS_ONCE(x) ({ \
32      UNUSED typeof(x) __var = (typeof(x)) 0; \
33     (volatile typeof(x) *)&(x); })
34#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
35
36#define mb() asm volatile ("dsb sy" : : : "memory")
37
38/*
39 * IVC channel reset protocol.
40 *
41 * Each end uses its tx_channel.state to indicate its synchronization state.
42 */
43enum ivc_state {
44	/*
45	 * This value is zero for backwards compatibility with services that
46	 * assume channels to be initially zeroed. Such channels are in an
47	 * initially valid state, but cannot be asynchronously reset, and must
48	 * maintain a valid state at all times.
49	 *
50	 * The transmitting end can enter the established state from the sync or
51	 * ack state when it observes the receiving endpoint in the ack or
52	 * established state, indicating that has cleared the counters in our
53	 * rx_channel.
54	 */
55	ivc_state_established = 0,
56
57	/*
58	 * If an endpoint is observed in the sync state, the remote endpoint is
59	 * allowed to clear the counters it owns asynchronously with respect to
60	 * the current endpoint. Therefore, the current endpoint is no longer
61	 * allowed to communicate.
62	 */
63	ivc_state_sync,
64
65	/*
66	 * When the transmitting end observes the receiving end in the sync
67	 * state, it can clear the w_count and r_count and transition to the ack
68	 * state. If the remote endpoint observes us in the ack state, it can
69	 * return to the established state once it has cleared its counters.
70	 */
71	ivc_state_ack
72};
73
74/*
75 * This structure is divided into two-cache aligned parts, the first is only
76 * written through the tx_channel pointer, while the second is only written
77 * through the rx_channel pointer. This delineates ownership of the cache lines,
78 * which is critical to performance and necessary in non-cache coherent
79 * implementations.
80 */
81struct tegra_ivc_channel_header {
82	union {
83		/* fields owned by the transmitting end */
84		struct {
85			uint32_t w_count;
86			uint32_t state;
87		};
88		uint8_t w_align[TEGRA_IVC_ALIGN];
89	};
90	union {
91		/* fields owned by the receiving end */
92		uint32_t r_count;
93		uint8_t r_align[TEGRA_IVC_ALIGN];
94	};
95};
96
97static inline unsigned long tegra_ivc_frame_addr(struct tegra_ivc *ivc,
98					 struct tegra_ivc_channel_header *h,
99					 uint32_t frame)
100{
101	ZF_LOGF_IF(frame >= ivc->nframes,
102               "Accesing non-existent frame number %d of %d frames", frame, ivc->nframes);
103
104	return ((unsigned long)h) + sizeof(struct tegra_ivc_channel_header) +
105	       (ivc->frame_size * frame);
106}
107
108static inline void *tegra_ivc_frame_pointer(struct tegra_ivc *ivc,
109					    struct tegra_ivc_channel_header *ch,
110					    uint32_t frame)
111{
112	return (void *)tegra_ivc_frame_addr(ivc, ch, frame);
113}
114
115static inline int tegra_ivc_channel_empty(struct tegra_ivc *ivc,
116					  struct tegra_ivc_channel_header *ch)
117{
118	/*
119	 * This function performs multiple checks on the same values with
120	 * security implications, so create snapshots with ACCESS_ONCE() to
121	 * ensure that these checks use the same values.
122	 */
123	uint32_t w_count = ACCESS_ONCE(ch->w_count);
124	uint32_t r_count = ACCESS_ONCE(ch->r_count);
125
126	/*
127	 * Perform an over-full check to prevent denial of service attacks where
128	 * a server could be easily fooled into believing that there's an
129	 * extremely large number of frames ready, since receivers are not
130	 * expected to check for full or over-full conditions.
131	 *
132	 * Although the channel isn't empty, this is an invalid case caused by
133	 * a potentially malicious peer, so returning empty is safer, because it
134	 * gives the impression that the channel has gone silent.
135	 */
136	if (w_count - r_count > ivc->nframes)
137		return 1;
138
139	return w_count == r_count;
140}
141
142static inline int tegra_ivc_channel_full(struct tegra_ivc *ivc,
143					 struct tegra_ivc_channel_header *ch)
144{
145	/*
146	 * Invalid cases where the counters indicate that the queue is over
147	 * capacity also appear full.
148	 */
149	return (ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count)) >=
150	       ivc->nframes;
151}
152
153static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
154{
155	ACCESS_ONCE(ivc->rx_channel->r_count) =
156			ACCESS_ONCE(ivc->rx_channel->r_count) + 1;
157
158	if (ivc->r_pos == ivc->nframes - 1)
159		ivc->r_pos = 0;
160	else
161		ivc->r_pos++;
162}
163
164static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
165{
166	ACCESS_ONCE(ivc->tx_channel->w_count) =
167			ACCESS_ONCE(ivc->tx_channel->w_count) + 1;
168
169	if (ivc->w_pos == ivc->nframes - 1)
170		ivc->w_pos = 0;
171	else
172		ivc->w_pos++;
173}
174
175static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
176{
177	/*
178	 * tx_channel->state is set locally, so it is not synchronized with
179	 * state from the remote peer. The remote peer cannot reset its
180	 * transmit counters until we've acknowledged its synchronization
181	 * request, so no additional synchronization is required because an
182	 * asynchronous transition of rx_channel->state to ivc_state_ack is not
183	 * allowed.
184	 */
185	if (ivc->tx_channel->state != ivc_state_established)
186		return -ECONNRESET;
187
188	/*
189	 * Avoid unnecessary invalidations when performing repeated accesses to
190	 * an IVC channel by checking the old queue pointers first.
191	 * Synchronization is only necessary when these pointers indicate empty
192	 * or full.
193	 */
194	if (!tegra_ivc_channel_empty(ivc, ivc->rx_channel))
195		return 0;
196
197	return tegra_ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
198}
199
200static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
201{
202	if (ivc->tx_channel->state != ivc_state_established)
203		return -ECONNRESET;
204
205	if (!tegra_ivc_channel_full(ivc, ivc->tx_channel))
206		return 0;
207
208	return tegra_ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
209}
210
211static inline uint32_t tegra_ivc_channel_avail_count(struct tegra_ivc *ivc,
212	struct tegra_ivc_channel_header *ch)
213{
214	/*
215	 * This function isn't expected to be used in scenarios where an
216	 * over-full situation can lead to denial of service attacks. See the
217	 * comment in tegra_ivc_channel_empty() for an explanation about
218	 * special over-full considerations.
219	 */
220	return ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count);
221}
222
223int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, void **frame)
224{
225	int result = tegra_ivc_check_read(ivc);
226	if (result < 0)
227		return result;
228
229	/*
230	 * Order observation of w_pos potentially indicating new data before
231	 * data read.
232	 */
233	mb();
234
235	*frame = tegra_ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
236
237	return 0;
238}
239
240int tegra_ivc_read_advance(struct tegra_ivc *ivc)
241{
242	int result;
243
244	/*
245	 * No read barriers or synchronization here: the caller is expected to
246	 * have already observed the channel non-empty. This check is just to
247	 * catch programming errors.
248	 */
249	result = tegra_ivc_check_read(ivc);
250	if (result)
251		return result;
252
253	tegra_ivc_advance_rx(ivc);
254
255	/*
256	 * Ensure our write to r_pos occurs before our read from w_pos.
257	 */
258	mb();
259
260	if (tegra_ivc_channel_avail_count(ivc, ivc->rx_channel) ==
261	    ivc->nframes - 1)
262		ivc->notify(ivc, ivc->notify_token);
263
264	return 0;
265}
266
267int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, void **frame)
268{
269	int result = tegra_ivc_check_write(ivc);
270	if (result)
271		return result;
272
273	*frame = tegra_ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
274
275	return 0;
276}
277
278int tegra_ivc_write_advance(struct tegra_ivc *ivc)
279{
280	int result;
281
282	result = tegra_ivc_check_write(ivc);
283	if (result)
284		return result;
285
286	/*
287	 * Order any possible stores to the frame before update of w_pos.
288	 */
289	mb();
290
291	tegra_ivc_advance_tx(ivc);
292
293	/*
294	 * Ensure our write to w_pos occurs before our read from r_pos.
295	 */
296	mb();
297
298	if (tegra_ivc_channel_avail_count(ivc, ivc->tx_channel) == 1)
299		ivc->notify(ivc, ivc->notify_token);
300
301	return 0;
302}
303
304/*
305 * ===============================================================
306 *  IVC State Transition Table - see tegra_ivc_channel_notified()
307 * ===============================================================
308 *
309 *	local	remote	action
310 *	-----	------	-----------------------------------
311 *	SYNC	EST	<none>
312 *	SYNC	ACK	reset counters; move to EST; notify
313 *	SYNC	SYNC	reset counters; move to ACK; notify
314 *	ACK	EST	move to EST; notify
315 *	ACK	ACK	move to EST; notify
316 *	ACK	SYNC	reset counters; move to ACK; notify
317 *	EST	EST	<none>
318 *	EST	ACK	<none>
319 *	EST	SYNC	reset counters; move to ACK; notify
320 *
321 * ===============================================================
322 */
323int tegra_ivc_channel_notified(struct tegra_ivc *ivc)
324{
325	enum ivc_state peer_state;
326
327	/* Copy the receiver's state out of shared memory. */
328	peer_state = ACCESS_ONCE(ivc->rx_channel->state);
329
330	if (peer_state == ivc_state_sync) {
331		/*
332		 * Order observation of ivc_state_sync before stores clearing
333		 * tx_channel.
334		 */
335		mb();
336
337		/*
338		 * Reset tx_channel counters. The remote end is in the SYNC
339		 * state and won't make progress until we change our state,
340		 * so the counters are not in use at this time.
341		 */
342		ivc->tx_channel->w_count = 0;
343		ivc->rx_channel->r_count = 0;
344
345		ivc->w_pos = 0;
346		ivc->r_pos = 0;
347
348		/*
349		 * Ensure that counters appear cleared before new state can be
350		 * observed.
351		 */
352		mb();
353
354		/*
355		 * Move to ACK state. We have just cleared our counters, so it
356		 * is now safe for the remote end to start using these values.
357		 */
358		ivc->tx_channel->state = ivc_state_ack;
359
360		/*
361		 * Notify remote end to observe state transition.
362		 */
363		ivc->notify(ivc, ivc->notify_token);
364	} else if (ivc->tx_channel->state == ivc_state_sync &&
365			peer_state == ivc_state_ack) {
366		/*
367		 * Order observation of ivc_state_sync before stores clearing
368		 * tx_channel.
369		 */
370		mb();
371
372		/*
373		 * Reset tx_channel counters. The remote end is in the ACK
374		 * state and won't make progress until we change our state,
375		 * so the counters are not in use at this time.
376		 */
377		ivc->tx_channel->w_count = 0;
378		ivc->rx_channel->r_count = 0;
379
380		ivc->w_pos = 0;
381		ivc->r_pos = 0;
382
383		/*
384		 * Ensure that counters appear cleared before new state can be
385		 * observed.
386		 */
387		mb();
388
389		/*
390		 * Move to ESTABLISHED state. We know that the remote end has
391		 * already cleared its counters, so it is safe to start
392		 * writing/reading on this channel.
393		 */
394		ivc->tx_channel->state = ivc_state_established;
395
396		/*
397		 * Notify remote end to observe state transition.
398		 */
399		ivc->notify(ivc, ivc->notify_token);
400	} else if (ivc->tx_channel->state == ivc_state_ack) {
401		/*
402		 * At this point, we have observed the peer to be in either
403		 * the ACK or ESTABLISHED state. Next, order observation of
404		 * peer state before storing to tx_channel.
405		 */
406		mb();
407
408		/*
409		 * Move to ESTABLISHED state. We know that we have previously
410		 * cleared our counters, and we know that the remote end has
411		 * cleared its counters, so it is safe to start writing/reading
412		 * on this channel.
413		 */
414		ivc->tx_channel->state = ivc_state_established;
415
416		/*
417		 * Notify remote end to observe state transition.
418		 */
419		ivc->notify(ivc, ivc->notify_token);
420	} else {
421		/*
422		 * There is no need to handle any further action. Either the
423		 * channel is already fully established, or we are waiting for
424		 * the remote end to catch up with our current state. Refer
425		 * to the diagram in "IVC State Transition Table" above.
426		 */
427	}
428
429	if (ivc->tx_channel->state != ivc_state_established)
430		return -EAGAIN;
431
432	return 0;
433}
434
435void tegra_ivc_channel_reset(struct tegra_ivc *ivc)
436{
437	ivc->tx_channel->state = ivc_state_sync;
438	ivc->notify(ivc, ivc->notify_token);
439}
440
441static int check_ivc_params(unsigned long qbase1, unsigned long qbase2, uint32_t nframes,
442			    uint32_t frame_size)
443{
444	int ret = 0;
445
446	ZF_LOGF_IF(OFFSETOF(struct tegra_ivc_channel_header, w_count) & (TEGRA_IVC_ALIGN - 1),
447               "w_count is not properly aligned to %d", TEGRA_IVC_ALIGN);
448	ZF_LOGF_IF(OFFSETOF(struct tegra_ivc_channel_header, r_count) & (TEGRA_IVC_ALIGN - 1),
449               "r_count is not properly aligned to %d", TEGRA_IVC_ALIGN);
450	ZF_LOGF_IF(sizeof(struct tegra_ivc_channel_header) & (TEGRA_IVC_ALIGN - 1),
451               "sizeof(struct tegre_ivc_channel_header) = %zd is not algined to %d",
452               sizeof(struct tegra_ivc_channel_header), TEGRA_IVC_ALIGN);
453
454	if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) {
455		ZF_LOGE("tegra_ivc: nframes * frame_size overflows\n");
456		return -EINVAL;
457	}
458
459	/*
460	 * The headers must at least be aligned enough for counters
461	 * to be accessed atomically.
462	 */
463	if ((qbase1 & (TEGRA_IVC_ALIGN - 1)) ||
464	    (qbase2 & (TEGRA_IVC_ALIGN - 1))) {
465		ZF_LOGE("tegra_ivc: channel start not aligned\n");
466		return -EINVAL;
467	}
468
469	if (frame_size & (TEGRA_IVC_ALIGN - 1)) {
470		ZF_LOGE("tegra_ivc: frame size not adequately aligned\n");
471		return -EINVAL;
472	}
473
474	if (qbase1 < qbase2) {
475		if (qbase1 + frame_size * nframes > qbase2)
476			ret = -EINVAL;
477	} else {
478		if (qbase2 + frame_size * nframes > qbase1)
479			ret = -EINVAL;
480	}
481
482	if (ret) {
483		ZF_LOGE("tegra_ivc: queue regions overlap\n");
484		return ret;
485	}
486
487	return 0;
488}
489
490int tegra_ivc_init(struct tegra_ivc *ivc, unsigned long rx_base, unsigned long tx_base,
491		   uint32_t nframes, uint32_t frame_size,
492		   void (*notify)(struct tegra_ivc *, void *), void *notify_token)
493{
494	int ret;
495
496	if (!ivc)
497		return -EINVAL;
498
499	ret = check_ivc_params(rx_base, tx_base, nframes, frame_size);
500	if (ret)
501		return ret;
502
503	ivc->rx_channel = (struct tegra_ivc_channel_header *)rx_base;
504	ivc->tx_channel = (struct tegra_ivc_channel_header *)tx_base;
505	ivc->w_pos = 0;
506	ivc->r_pos = 0;
507	ivc->nframes = nframes;
508	ivc->frame_size = frame_size;
509	ivc->notify = notify;
510    ivc->notify_token = notify_token;
511
512	return 0;
513}
514