1b2441318SGreg Kroah-Hartman// SPDX-License-Identifier: GPL-2.0
29fb6bc5bSMichael S. Tsirkin#define _GNU_SOURCE
39fb6bc5bSMichael S. Tsirkin#include "main.h"
49fb6bc5bSMichael S. Tsirkin#include <stdlib.h>
59fb6bc5bSMichael S. Tsirkin#include <stdio.h>
69fb6bc5bSMichael S. Tsirkin#include <string.h>
79fb6bc5bSMichael S. Tsirkin#include <pthread.h>
89fb6bc5bSMichael S. Tsirkin#include <malloc.h>
99fb6bc5bSMichael S. Tsirkin#include <assert.h>
109fb6bc5bSMichael S. Tsirkin#include <errno.h>
119fb6bc5bSMichael S. Tsirkin#include <limits.h>
129fb6bc5bSMichael S. Tsirkin
139fb6bc5bSMichael S. Tsirkin#define SMP_CACHE_BYTES 64
149fb6bc5bSMichael S. Tsirkin#define cache_line_size() SMP_CACHE_BYTES
159fb6bc5bSMichael S. Tsirkin#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
169fb6bc5bSMichael S. Tsirkin#define unlikely(x)    (__builtin_expect(!!(x), 0))
1752012619SMichael S. Tsirkin#define likely(x)    (__builtin_expect(!!(x), 1))
189fb6bc5bSMichael S. Tsirkin#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
195790eabcSMichael S. Tsirkin#define SIZE_MAX        (~(size_t)0)
20c8f06a06SMichael S. Tsirkin#define KMALLOC_MAX_SIZE SIZE_MAX
215790eabcSMichael S. Tsirkin
229fb6bc5bSMichael S. Tsirkintypedef pthread_spinlock_t  spinlock_t;
239fb6bc5bSMichael S. Tsirkin
249fb6bc5bSMichael S. Tsirkintypedef int gfp_t;
255790eabcSMichael S. Tsirkin#define __GFP_ZERO 0x1
2659e6ae53SMichael S. Tsirkin
275790eabcSMichael S. Tsirkinstatic void *kmalloc(unsigned size, gfp_t gfp)
289fb6bc5bSMichael S. Tsirkin{
299fb6bc5bSMichael S. Tsirkin	void *p = memalign(64, size);
309fb6bc5bSMichael S. Tsirkin	if (!p)
319fb6bc5bSMichael S. Tsirkin		return p;
329fb6bc5bSMichael S. Tsirkin
335790eabcSMichael S. Tsirkin	if (gfp & __GFP_ZERO)
345790eabcSMichael S. Tsirkin		memset(p, 0, size);
359fb6bc5bSMichael S. Tsirkin	return p;
369fb6bc5bSMichael S. Tsirkin}
379fb6bc5bSMichael S. Tsirkin
385790eabcSMichael S. Tsirkinstatic inline void *kzalloc(unsigned size, gfp_t flags)
395790eabcSMichael S. Tsirkin{
405790eabcSMichael S. Tsirkin	return kmalloc(size, flags | __GFP_ZERO);
415790eabcSMichael S. Tsirkin}
425790eabcSMichael S. Tsirkin
435790eabcSMichael S. Tsirkinstatic inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
445790eabcSMichael S. Tsirkin{
455790eabcSMichael S. Tsirkin	if (size != 0 && n > SIZE_MAX / size)
465790eabcSMichael S. Tsirkin		return NULL;
475790eabcSMichael S. Tsirkin	return kmalloc(n * size, flags);
485790eabcSMichael S. Tsirkin}
495790eabcSMichael S. Tsirkin
505790eabcSMichael S. Tsirkinstatic inline void *kcalloc(size_t n, size_t size, gfp_t flags)
515790eabcSMichael S. Tsirkin{
525790eabcSMichael S. Tsirkin	return kmalloc_array(n, size, flags | __GFP_ZERO);
535790eabcSMichael S. Tsirkin}
545790eabcSMichael S. Tsirkin
559fb6bc5bSMichael S. Tsirkinstatic void kfree(void *p)
569fb6bc5bSMichael S. Tsirkin{
579fb6bc5bSMichael S. Tsirkin	if (p)
589fb6bc5bSMichael S. Tsirkin		free(p);
599fb6bc5bSMichael S. Tsirkin}
609fb6bc5bSMichael S. Tsirkin
61c8f06a06SMichael S. Tsirkin#define kvmalloc_array kmalloc_array
62c8f06a06SMichael S. Tsirkin#define kvfree kfree
63c8f06a06SMichael S. Tsirkin
649fb6bc5bSMichael S. Tsirkinstatic void spin_lock_init(spinlock_t *lock)
659fb6bc5bSMichael S. Tsirkin{
669fb6bc5bSMichael S. Tsirkin	int r = pthread_spin_init(lock, 0);
679fb6bc5bSMichael S. Tsirkin	assert(!r);
689fb6bc5bSMichael S. Tsirkin}
699fb6bc5bSMichael S. Tsirkin
709fb6bc5bSMichael S. Tsirkinstatic void spin_lock(spinlock_t *lock)
719fb6bc5bSMichael S. Tsirkin{
729fb6bc5bSMichael S. Tsirkin	int ret = pthread_spin_lock(lock);
739fb6bc5bSMichael S. Tsirkin	assert(!ret);
749fb6bc5bSMichael S. Tsirkin}
759fb6bc5bSMichael S. Tsirkin
769fb6bc5bSMichael S. Tsirkinstatic void spin_unlock(spinlock_t *lock)
779fb6bc5bSMichael S. Tsirkin{
789fb6bc5bSMichael S. Tsirkin	int ret = pthread_spin_unlock(lock);
799fb6bc5bSMichael S. Tsirkin	assert(!ret);
809fb6bc5bSMichael S. Tsirkin}
819fb6bc5bSMichael S. Tsirkin
829fb6bc5bSMichael S. Tsirkinstatic void spin_lock_bh(spinlock_t *lock)
839fb6bc5bSMichael S. Tsirkin{
849fb6bc5bSMichael S. Tsirkin	spin_lock(lock);
859fb6bc5bSMichael S. Tsirkin}
869fb6bc5bSMichael S. Tsirkin
879fb6bc5bSMichael S. Tsirkinstatic void spin_unlock_bh(spinlock_t *lock)
889fb6bc5bSMichael S. Tsirkin{
899fb6bc5bSMichael S. Tsirkin	spin_unlock(lock);
909fb6bc5bSMichael S. Tsirkin}
919fb6bc5bSMichael S. Tsirkin
929fb6bc5bSMichael S. Tsirkinstatic void spin_lock_irq(spinlock_t *lock)
939fb6bc5bSMichael S. Tsirkin{
949fb6bc5bSMichael S. Tsirkin	spin_lock(lock);
959fb6bc5bSMichael S. Tsirkin}
969fb6bc5bSMichael S. Tsirkin
979fb6bc5bSMichael S. Tsirkinstatic void spin_unlock_irq(spinlock_t *lock)
989fb6bc5bSMichael S. Tsirkin{
999fb6bc5bSMichael S. Tsirkin	spin_unlock(lock);
1009fb6bc5bSMichael S. Tsirkin}
1019fb6bc5bSMichael S. Tsirkin
1029fb6bc5bSMichael S. Tsirkinstatic void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
1039fb6bc5bSMichael S. Tsirkin{
1049fb6bc5bSMichael S. Tsirkin	spin_lock(lock);
1059fb6bc5bSMichael S. Tsirkin}
1069fb6bc5bSMichael S. Tsirkin
1079fb6bc5bSMichael S. Tsirkinstatic void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
1089fb6bc5bSMichael S. Tsirkin{
1099fb6bc5bSMichael S. Tsirkin	spin_unlock(lock);
1109fb6bc5bSMichael S. Tsirkin}
1119fb6bc5bSMichael S. Tsirkin
1129fb6bc5bSMichael S. Tsirkin#include "../../../include/linux/ptr_ring.h"
1139fb6bc5bSMichael S. Tsirkin
1149fb6bc5bSMichael S. Tsirkinstatic unsigned long long headcnt, tailcnt;
1159fb6bc5bSMichael S. Tsirkinstatic struct ptr_ring array ____cacheline_aligned_in_smp;
1169fb6bc5bSMichael S. Tsirkin
1179fb6bc5bSMichael S. Tsirkin/* implemented by ring */
1189fb6bc5bSMichael S. Tsirkinvoid alloc_ring(void)
1199fb6bc5bSMichael S. Tsirkin{
1209fb6bc5bSMichael S. Tsirkin	int ret = ptr_ring_init(&array, ring_size, 0);
1219fb6bc5bSMichael S. Tsirkin	assert(!ret);
1223008a206SMichael S. Tsirkin	/* Hacky way to poke at ring internals. Useful for testing though. */
1233008a206SMichael S. Tsirkin	if (param)
1243008a206SMichael S. Tsirkin		array.batch = param;
1259fb6bc5bSMichael S. Tsirkin}
1269fb6bc5bSMichael S. Tsirkin
1279fb6bc5bSMichael S. Tsirkin/* guest side */
1289fb6bc5bSMichael S. Tsirkinint add_inbuf(unsigned len, void *buf, void *datap)
1299fb6bc5bSMichael S. Tsirkin{
1309fb6bc5bSMichael S. Tsirkin	int ret;
1319fb6bc5bSMichael S. Tsirkin
1329fb6bc5bSMichael S. Tsirkin	ret = __ptr_ring_produce(&array, buf);
1339fb6bc5bSMichael S. Tsirkin	if (ret >= 0) {
1349fb6bc5bSMichael S. Tsirkin		ret = 0;
1359fb6bc5bSMichael S. Tsirkin		headcnt++;
1369fb6bc5bSMichael S. Tsirkin	}
1379fb6bc5bSMichael S. Tsirkin
1389fb6bc5bSMichael S. Tsirkin	return ret;
1399fb6bc5bSMichael S. Tsirkin}
1409fb6bc5bSMichael S. Tsirkin
1419fb6bc5bSMichael S. Tsirkin/*
1429fb6bc5bSMichael S. Tsirkin * ptr_ring API provides no way for producer to find out whether a given
1439fb6bc5bSMichael S. Tsirkin * buffer was consumed.  Our tests merely require that a successful get_buf
1449fb6bc5bSMichael S. Tsirkin * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
1459fb6bc5bSMichael S. Tsirkin * fake it accordingly.
1469fb6bc5bSMichael S. Tsirkin */
1479fb6bc5bSMichael S. Tsirkinvoid *get_buf(unsigned *lenp, void **bufp)
1489fb6bc5bSMichael S. Tsirkin{
1499fb6bc5bSMichael S. Tsirkin	void *datap;
1509fb6bc5bSMichael S. Tsirkin
1519fb6bc5bSMichael S. Tsirkin	if (tailcnt == headcnt || __ptr_ring_full(&array))
1529fb6bc5bSMichael S. Tsirkin		datap = NULL;
1539fb6bc5bSMichael S. Tsirkin	else {
1549fb6bc5bSMichael S. Tsirkin		datap = "Buffer\n";
1559fb6bc5bSMichael S. Tsirkin		++tailcnt;
1569fb6bc5bSMichael S. Tsirkin	}
1579fb6bc5bSMichael S. Tsirkin
1589fb6bc5bSMichael S. Tsirkin	return datap;
1599fb6bc5bSMichael S. Tsirkin}
1609fb6bc5bSMichael S. Tsirkin
161d3c3589bSPaolo Bonzinibool used_empty()
1629fb6bc5bSMichael S. Tsirkin{
163d3c3589bSPaolo Bonzini	return (tailcnt == headcnt || __ptr_ring_full(&array));
1649fb6bc5bSMichael S. Tsirkin}
1659fb6bc5bSMichael S. Tsirkin
1669fb6bc5bSMichael S. Tsirkinvoid disable_call()
1679fb6bc5bSMichael S. Tsirkin{
1689fb6bc5bSMichael S. Tsirkin	assert(0);
1699fb6bc5bSMichael S. Tsirkin}
1709fb6bc5bSMichael S. Tsirkin
1719fb6bc5bSMichael S. Tsirkinbool enable_call()
1729fb6bc5bSMichael S. Tsirkin{
1739fb6bc5bSMichael S. Tsirkin	assert(0);
1749fb6bc5bSMichael S. Tsirkin}
1759fb6bc5bSMichael S. Tsirkin
1769fb6bc5bSMichael S. Tsirkinvoid kick_available(void)
1779fb6bc5bSMichael S. Tsirkin{
1789fb6bc5bSMichael S. Tsirkin	assert(0);
1799fb6bc5bSMichael S. Tsirkin}
1809fb6bc5bSMichael S. Tsirkin
1819fb6bc5bSMichael S. Tsirkin/* host side */
1829fb6bc5bSMichael S. Tsirkinvoid disable_kick()
1839fb6bc5bSMichael S. Tsirkin{
1849fb6bc5bSMichael S. Tsirkin	assert(0);
1859fb6bc5bSMichael S. Tsirkin}
1869fb6bc5bSMichael S. Tsirkin
1879fb6bc5bSMichael S. Tsirkinbool enable_kick()
1889fb6bc5bSMichael S. Tsirkin{
1899fb6bc5bSMichael S. Tsirkin	assert(0);
1909fb6bc5bSMichael S. Tsirkin}
1919fb6bc5bSMichael S. Tsirkin
192d3c3589bSPaolo Bonzinibool avail_empty()
1939fb6bc5bSMichael S. Tsirkin{
19430f1d370SMichael S. Tsirkin	return __ptr_ring_empty(&array);
1959fb6bc5bSMichael S. Tsirkin}
1969fb6bc5bSMichael S. Tsirkin
1979fb6bc5bSMichael S. Tsirkinbool use_buf(unsigned *lenp, void **bufp)
1989fb6bc5bSMichael S. Tsirkin{
1999fb6bc5bSMichael S. Tsirkin	void *ptr;
2009fb6bc5bSMichael S. Tsirkin
2019fb6bc5bSMichael S. Tsirkin	ptr = __ptr_ring_consume(&array);
2029fb6bc5bSMichael S. Tsirkin
2039fb6bc5bSMichael S. Tsirkin	return ptr;
2049fb6bc5bSMichael S. Tsirkin}
2059fb6bc5bSMichael S. Tsirkin
2069fb6bc5bSMichael S. Tsirkinvoid call_used(void)
2079fb6bc5bSMichael S. Tsirkin{
2089fb6bc5bSMichael S. Tsirkin	assert(0);
2099fb6bc5bSMichael S. Tsirkin}
210