1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#define _GNU_SOURCE
5#include <linux/compiler.h>
6#include <linux/ring_buffer.h>
7#include <pthread.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <sys/mman.h>
11#include <sys/syscall.h>
12#include <sys/sysinfo.h>
13#include <test_progs.h>
14#include <uapi/linux/bpf.h>
15#include <unistd.h>
16
17#include "user_ringbuf_fail.skel.h"
18#include "user_ringbuf_success.skel.h"
19
20#include "../progs/test_user_ringbuf.h"
21
22static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
23static const long c_ringbuf_size = 1 << 12; /* 1 small page */
24static const long c_max_entries = c_ringbuf_size / c_sample_size;
25
26static void drain_current_samples(void)
27{
28	syscall(__NR_getpgid);
29}
30
31static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
32{
33	int i, err = 0;
34
35	/* Write some number of samples to the ring buffer. */
36	for (i = 0; i < num_samples; i++) {
37		struct sample *entry;
38		int read;
39
40		entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
41		if (!entry) {
42			err = -errno;
43			goto done;
44		}
45
46		entry->pid = getpid();
47		entry->seq = i;
48		entry->value = i * i;
49
50		read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
51		if (read <= 0) {
52			/* Assert on the error path to avoid spamming logs with
53			 * mostly success messages.
54			 */
55			ASSERT_GT(read, 0, "snprintf_comm");
56			err = read;
57			user_ring_buffer__discard(ringbuf, entry);
58			goto done;
59		}
60
61		user_ring_buffer__submit(ringbuf, entry);
62	}
63
64done:
65	drain_current_samples();
66
67	return err;
68}
69
70static struct user_ringbuf_success *open_load_ringbuf_skel(void)
71{
72	struct user_ringbuf_success *skel;
73	int err;
74
75	skel = user_ringbuf_success__open();
76	if (!ASSERT_OK_PTR(skel, "skel_open"))
77		return NULL;
78
79	err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
80	if (!ASSERT_OK(err, "set_max_entries"))
81		goto cleanup;
82
83	err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
84	if (!ASSERT_OK(err, "set_max_entries"))
85		goto cleanup;
86
87	err = user_ringbuf_success__load(skel);
88	if (!ASSERT_OK(err, "skel_load"))
89		goto cleanup;
90
91	return skel;
92
93cleanup:
94	user_ringbuf_success__destroy(skel);
95	return NULL;
96}
97
98static void test_user_ringbuf_mappings(void)
99{
100	int err, rb_fd;
101	int page_size = getpagesize();
102	void *mmap_ptr;
103	struct user_ringbuf_success *skel;
104
105	skel = open_load_ringbuf_skel();
106	if (!skel)
107		return;
108
109	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
110	/* cons_pos can be mapped R/O, can't add +X with mprotect. */
111	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
112	ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
113	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
114	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
115	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
116	err = -errno;
117	ASSERT_ERR(err, "wr_prod_pos_err");
118	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
119
120	/* prod_pos can be mapped RW, can't add +X with mprotect. */
121	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
122			rb_fd, page_size);
123	ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
124	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
125	err = -errno;
126	ASSERT_ERR(err, "wr_prod_pos_err");
127	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
128
129	/* data pages can be mapped RW, can't add +X with mprotect. */
130	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
131			2 * page_size);
132	ASSERT_OK_PTR(mmap_ptr, "rw_data");
133	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
134	err = -errno;
135	ASSERT_ERR(err, "exec_data_err");
136	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
137
138	user_ringbuf_success__destroy(skel);
139}
140
141static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
142				     struct ring_buffer **kern_ringbuf_out,
143				     ring_buffer_sample_fn callback,
144				     struct user_ring_buffer **user_ringbuf_out)
145{
146	struct user_ringbuf_success *skel;
147	struct ring_buffer *kern_ringbuf = NULL;
148	struct user_ring_buffer *user_ringbuf = NULL;
149	int err = -ENOMEM, rb_fd;
150
151	skel = open_load_ringbuf_skel();
152	if (!skel)
153		return err;
154
155	/* only trigger BPF program for current process */
156	skel->bss->pid = getpid();
157
158	if (kern_ringbuf_out) {
159		rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
160		kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
161		if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
162			goto cleanup;
163
164		*kern_ringbuf_out = kern_ringbuf;
165	}
166
167	if (user_ringbuf_out) {
168		rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
169		user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
170		if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
171			goto cleanup;
172
173		*user_ringbuf_out = user_ringbuf;
174		ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
175	}
176
177	err = user_ringbuf_success__attach(skel);
178	if (!ASSERT_OK(err, "skel_attach"))
179		goto cleanup;
180
181	*skel_out = skel;
182	return 0;
183
184cleanup:
185	if (kern_ringbuf_out)
186		*kern_ringbuf_out = NULL;
187	if (user_ringbuf_out)
188		*user_ringbuf_out = NULL;
189	ring_buffer__free(kern_ringbuf);
190	user_ring_buffer__free(user_ringbuf);
191	user_ringbuf_success__destroy(skel);
192	return err;
193}
194
195static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
196					 struct user_ring_buffer **ringbuf_out)
197{
198	return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
199}
200
201static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
202					       __u32 size, __u64 producer_pos, int err)
203{
204	void *data_ptr;
205	__u64 *producer_pos_ptr;
206	int rb_fd, page_size = getpagesize();
207
208	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
209
210	ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
211
212	/* Map the producer_pos as RW. */
213	producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
214				MAP_SHARED, rb_fd, page_size);
215	ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
216
217	/* Map the data pages as RW. */
218	data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
219	ASSERT_OK_PTR(data_ptr, "rw_data");
220
221	memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
222	*(__u32 *)data_ptr = size;
223
224	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
225	smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
226
227	drain_current_samples();
228	ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
229	ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
230
231	ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
232	ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
233}
234
235static void test_user_ringbuf_post_misaligned(void)
236{
237	struct user_ringbuf_success *skel;
238	struct user_ring_buffer *ringbuf;
239	int err;
240	__u32 size = (1 << 5) + 7;
241
242	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
243	if (!ASSERT_OK(err, "misaligned_skel"))
244		return;
245
246	manually_write_test_invalid_sample(skel, size, size, -EINVAL);
247	user_ring_buffer__free(ringbuf);
248	user_ringbuf_success__destroy(skel);
249}
250
251static void test_user_ringbuf_post_producer_wrong_offset(void)
252{
253	struct user_ringbuf_success *skel;
254	struct user_ring_buffer *ringbuf;
255	int err;
256	__u32 size = (1 << 5);
257
258	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
259	if (!ASSERT_OK(err, "wrong_offset_skel"))
260		return;
261
262	manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
263	user_ring_buffer__free(ringbuf);
264	user_ringbuf_success__destroy(skel);
265}
266
267static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
268{
269	struct user_ringbuf_success *skel;
270	struct user_ring_buffer *ringbuf;
271	int err;
272	__u32 size = c_ringbuf_size;
273
274	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
275	if (!ASSERT_OK(err, "huge_sample_skel"))
276		return;
277
278	manually_write_test_invalid_sample(skel, size, size, -E2BIG);
279	user_ring_buffer__free(ringbuf);
280	user_ringbuf_success__destroy(skel);
281}
282
283static void test_user_ringbuf_basic(void)
284{
285	struct user_ringbuf_success *skel;
286	struct user_ring_buffer *ringbuf;
287	int err;
288
289	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
290	if (!ASSERT_OK(err, "ringbuf_basic_skel"))
291		return;
292
293	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
294
295	err = write_samples(ringbuf, 2);
296	if (!ASSERT_OK(err, "write_samples"))
297		goto cleanup;
298
299	ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
300
301cleanup:
302	user_ring_buffer__free(ringbuf);
303	user_ringbuf_success__destroy(skel);
304}
305
306static void test_user_ringbuf_sample_full_ring_buffer(void)
307{
308	struct user_ringbuf_success *skel;
309	struct user_ring_buffer *ringbuf;
310	int err;
311	void *sample;
312
313	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
314	if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
315		return;
316
317	sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
318	if (!ASSERT_OK_PTR(sample, "full_sample"))
319		goto cleanup;
320
321	user_ring_buffer__submit(ringbuf, sample);
322	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
323	drain_current_samples();
324	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
325
326cleanup:
327	user_ring_buffer__free(ringbuf);
328	user_ringbuf_success__destroy(skel);
329}
330
331static void test_user_ringbuf_post_alignment_autoadjust(void)
332{
333	struct user_ringbuf_success *skel;
334	struct user_ring_buffer *ringbuf;
335	struct sample *sample;
336	int err;
337
338	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
339	if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
340		return;
341
342	/* libbpf should automatically round any sample up to an 8-byte alignment. */
343	sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
344	ASSERT_OK_PTR(sample, "reserve_autoaligned");
345	user_ring_buffer__submit(ringbuf, sample);
346
347	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
348	drain_current_samples();
349	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
350
351	user_ring_buffer__free(ringbuf);
352	user_ringbuf_success__destroy(skel);
353}
354
355static void test_user_ringbuf_overfill(void)
356{
357	struct user_ringbuf_success *skel;
358	struct user_ring_buffer *ringbuf;
359	int err;
360
361	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
362	if (err)
363		return;
364
365	err = write_samples(ringbuf, c_max_entries * 5);
366	ASSERT_ERR(err, "write_samples");
367	ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
368
369	user_ring_buffer__free(ringbuf);
370	user_ringbuf_success__destroy(skel);
371}
372
373static void test_user_ringbuf_discards_properly_ignored(void)
374{
375	struct user_ringbuf_success *skel;
376	struct user_ring_buffer *ringbuf;
377	int err, num_discarded = 0;
378	__u64 *token;
379
380	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
381	if (err)
382		return;
383
384	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
385
386	while (1) {
387		/* Write samples until the buffer is full. */
388		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
389		if (!token)
390			break;
391
392		user_ring_buffer__discard(ringbuf, token);
393		num_discarded++;
394	}
395
396	if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
397		goto cleanup;
398
399	/* Should not read any samples, as they are all discarded. */
400	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
401	drain_current_samples();
402	ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
403
404	/* Now that the ring buffer has been drained, we should be able to
405	 * reserve another token.
406	 */
407	token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
408
409	if (!ASSERT_OK_PTR(token, "new_token"))
410		goto cleanup;
411
412	user_ring_buffer__discard(ringbuf, token);
413cleanup:
414	user_ring_buffer__free(ringbuf);
415	user_ringbuf_success__destroy(skel);
416}
417
418static void test_user_ringbuf_loop(void)
419{
420	struct user_ringbuf_success *skel;
421	struct user_ring_buffer *ringbuf;
422	uint32_t total_samples = 8192;
423	uint32_t remaining_samples = total_samples;
424	int err;
425
426	BUILD_BUG_ON(total_samples <= c_max_entries);
427	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
428	if (err)
429		return;
430
431	do  {
432		uint32_t curr_samples;
433
434		curr_samples = remaining_samples > c_max_entries
435			? c_max_entries : remaining_samples;
436		err = write_samples(ringbuf, curr_samples);
437		if (err != 0) {
438			/* Assert inside of if statement to avoid flooding logs
439			 * on the success path.
440			 */
441			ASSERT_OK(err, "write_samples");
442			goto cleanup;
443		}
444
445		remaining_samples -= curr_samples;
446		ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
447			  "current_batched_entries");
448	} while (remaining_samples > 0);
449	ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
450
451cleanup:
452	user_ring_buffer__free(ringbuf);
453	user_ringbuf_success__destroy(skel);
454}
455
456static int send_test_message(struct user_ring_buffer *ringbuf,
457			     enum test_msg_op op, s64 operand_64,
458			     s32 operand_32)
459{
460	struct test_msg *msg;
461
462	msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
463	if (!msg) {
464		/* Assert on the error path to avoid spamming logs with mostly
465		 * success messages.
466		 */
467		ASSERT_OK_PTR(msg, "reserve_msg");
468		return -ENOMEM;
469	}
470
471	msg->msg_op = op;
472
473	switch (op) {
474	case TEST_MSG_OP_INC64:
475	case TEST_MSG_OP_MUL64:
476		msg->operand_64 = operand_64;
477		break;
478	case TEST_MSG_OP_INC32:
479	case TEST_MSG_OP_MUL32:
480		msg->operand_32 = operand_32;
481		break;
482	default:
483		PRINT_FAIL("Invalid operand %d\n", op);
484		user_ring_buffer__discard(ringbuf, msg);
485		return -EINVAL;
486	}
487
488	user_ring_buffer__submit(ringbuf, msg);
489
490	return 0;
491}
492
493static void kick_kernel_read_messages(void)
494{
495	syscall(__NR_prctl);
496}
497
498static int handle_kernel_msg(void *ctx, void *data, size_t len)
499{
500	struct user_ringbuf_success *skel = ctx;
501	struct test_msg *msg = data;
502
503	switch (msg->msg_op) {
504	case TEST_MSG_OP_INC64:
505		skel->bss->user_mutated += msg->operand_64;
506		return 0;
507	case TEST_MSG_OP_INC32:
508		skel->bss->user_mutated += msg->operand_32;
509		return 0;
510	case TEST_MSG_OP_MUL64:
511		skel->bss->user_mutated *= msg->operand_64;
512		return 0;
513	case TEST_MSG_OP_MUL32:
514		skel->bss->user_mutated *= msg->operand_32;
515		return 0;
516	default:
517		fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
518		return -EINVAL;
519	}
520}
521
522static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
523					 struct user_ringbuf_success *skel)
524{
525	int cnt;
526
527	cnt = ring_buffer__consume(kern_ringbuf);
528	ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
529	ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
530}
531
532static void test_user_ringbuf_msg_protocol(void)
533{
534	struct user_ringbuf_success *skel;
535	struct user_ring_buffer *user_ringbuf;
536	struct ring_buffer *kern_ringbuf;
537	int err, i;
538	__u64 expected_kern = 0;
539
540	err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
541	if (!ASSERT_OK(err, "create_ringbufs"))
542		return;
543
544	for (i = 0; i < 64; i++) {
545		enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
546		__u64 operand_64 = TEST_OP_64;
547		__u32 operand_32 = TEST_OP_32;
548
549		err = send_test_message(user_ringbuf, op, operand_64, operand_32);
550		if (err) {
551			/* Only assert on a failure to avoid spamming success logs. */
552			ASSERT_OK(err, "send_test_message");
553			goto cleanup;
554		}
555
556		switch (op) {
557		case TEST_MSG_OP_INC64:
558			expected_kern += operand_64;
559			break;
560		case TEST_MSG_OP_INC32:
561			expected_kern += operand_32;
562			break;
563		case TEST_MSG_OP_MUL64:
564			expected_kern *= operand_64;
565			break;
566		case TEST_MSG_OP_MUL32:
567			expected_kern *= operand_32;
568			break;
569		default:
570			PRINT_FAIL("Unexpected op %d\n", op);
571			goto cleanup;
572		}
573
574		if (i % 8 == 0) {
575			kick_kernel_read_messages();
576			ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
577			ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
578			drain_kernel_messages_buffer(kern_ringbuf, skel);
579		}
580	}
581
582cleanup:
583	ring_buffer__free(kern_ringbuf);
584	user_ring_buffer__free(user_ringbuf);
585	user_ringbuf_success__destroy(skel);
586}
587
588static void *kick_kernel_cb(void *arg)
589{
590	/* Kick the kernel, causing it to drain the ring buffer and then wake
591	 * up the test thread waiting on epoll.
592	 */
593	syscall(__NR_prlimit64);
594
595	return NULL;
596}
597
598static int spawn_kick_thread_for_poll(void)
599{
600	pthread_t thread;
601
602	return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
603}
604
605static void test_user_ringbuf_blocking_reserve(void)
606{
607	struct user_ringbuf_success *skel;
608	struct user_ring_buffer *ringbuf;
609	int err, num_written = 0;
610	__u64 *token;
611
612	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
613	if (err)
614		return;
615
616	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
617
618	while (1) {
619		/* Write samples until the buffer is full. */
620		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
621		if (!token)
622			break;
623
624		*token = 0xdeadbeef;
625
626		user_ring_buffer__submit(ringbuf, token);
627		num_written++;
628	}
629
630	if (!ASSERT_GE(num_written, 0, "num_written"))
631		goto cleanup;
632
633	/* Should not have read any samples until the kernel is kicked. */
634	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
635
636	/* We correctly time out after 1 second, without a sample. */
637	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
638	if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
639		goto cleanup;
640
641	err = spawn_kick_thread_for_poll();
642	if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
643		goto cleanup;
644
645	/* After spawning another thread that asychronously kicks the kernel to
646	 * drain the messages, we're able to block and successfully get a
647	 * sample once we receive an event notification.
648	 */
649	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
650
651	if (!ASSERT_OK_PTR(token, "block_token"))
652		goto cleanup;
653
654	ASSERT_GT(skel->bss->read, 0, "num_post_kill");
655	ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
656	ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
657	user_ring_buffer__discard(ringbuf, token);
658
659cleanup:
660	user_ring_buffer__free(ringbuf);
661	user_ringbuf_success__destroy(skel);
662}
663
664#define SUCCESS_TEST(_func) { _func, #_func }
665
666static struct {
667	void (*test_callback)(void);
668	const char *test_name;
669} success_tests[] = {
670	SUCCESS_TEST(test_user_ringbuf_mappings),
671	SUCCESS_TEST(test_user_ringbuf_post_misaligned),
672	SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
673	SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
674	SUCCESS_TEST(test_user_ringbuf_basic),
675	SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
676	SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
677	SUCCESS_TEST(test_user_ringbuf_overfill),
678	SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
679	SUCCESS_TEST(test_user_ringbuf_loop),
680	SUCCESS_TEST(test_user_ringbuf_msg_protocol),
681	SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
682};
683
684void test_user_ringbuf(void)
685{
686	int i;
687
688	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
689		if (!test__start_subtest(success_tests[i].test_name))
690			continue;
691
692		success_tests[i].test_callback();
693	}
694
695	RUN_TESTS(user_ringbuf_fail);
696}
697