1/*-
2 * Copyright (c) 2018 VMware, Inc.
3 *
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5 *
6 * $FreeBSD$
7 */
8
9#ifndef _VMCI_DEFS_H_
10#define _VMCI_DEFS_H_
11
12#include <sys/types.h>
13#include <machine/atomic.h>
14
15#include "vmci_kernel_defs.h"
16
17#pragma GCC diagnostic ignored "-Wcast-qual"
18
19/* Register offsets. */
20#define VMCI_STATUS_ADDR		0x00
21#define VMCI_CONTROL_ADDR		0x04
22#define VMCI_ICR_ADDR			0x08
23#define VMCI_IMR_ADDR			0x0c
24#define VMCI_DATA_OUT_ADDR		0x10
25#define VMCI_DATA_IN_ADDR		0x14
26#define VMCI_CAPS_ADDR			0x18
27#define VMCI_RESULT_LOW_ADDR		0x1c
28#define VMCI_RESULT_HIGH_ADDR		0x20
29
30/* Status register bits. */
31#define VMCI_STATUS_INT_ON		0x1
32
33/* Control register bits. */
34#define VMCI_CONTROL_RESET		0x1
35#define VMCI_CONTROL_INT_ENABLE		0x2
36#define VMCI_CONTROL_INT_DISABLE	0x4
37
38/* Capabilities register bits. */
39#define VMCI_CAPS_HYPERCALL		0x1
40#define VMCI_CAPS_GUESTCALL		0x2
41#define VMCI_CAPS_DATAGRAM		0x4
42#define VMCI_CAPS_NOTIFICATIONS		0x8
43
44/* Interrupt Cause register bits. */
45#define VMCI_ICR_DATAGRAM		0x1
46#define VMCI_ICR_NOTIFICATION		0x2
47
48/* Interrupt Mask register bits. */
49#define VMCI_IMR_DATAGRAM		0x1
50#define VMCI_IMR_NOTIFICATION		0x2
51
52/* Interrupt type. */
53typedef enum vmci_intr_type {
54	VMCI_INTR_TYPE_INTX =	0,
55	VMCI_INTR_TYPE_MSI =	1,
56	VMCI_INTR_TYPE_MSIX =	2
57} vmci_intr_type;
58
59/*
60 * Maximum MSI/MSI-X interrupt vectors in the device.
61 */
62#define VMCI_MAX_INTRS			2
63
64/*
65 * Supported interrupt vectors. There is one for each ICR value above,
66 * but here they indicate the position in the vector array/message ID.
67 */
68#define VMCI_INTR_DATAGRAM		0
69#define VMCI_INTR_NOTIFICATION		1
70
71/*
72 * A single VMCI device has an upper limit of 128 MiB on the amount of
73 * memory that can be used for queue pairs.
74 */
75#define VMCI_MAX_GUEST_QP_MEMORY	(128 * 1024 * 1024)
76
77/*
78 * We have a fixed set of resource IDs available in the VMX.
79 * This allows us to have a very simple implementation since we statically
80 * know how many will create datagram handles. If a new caller arrives and
81 * we have run out of slots we can manually increment the maximum size of
82 * available resource IDs.
83 */
84
85typedef uint32_t vmci_resource;
86
87/* VMCI reserved hypervisor datagram resource IDs. */
88#define VMCI_RESOURCES_QUERY		0
89#define VMCI_GET_CONTEXT_ID		1
90#define VMCI_SET_NOTIFY_BITMAP		2
91#define VMCI_DOORBELL_LINK		3
92#define VMCI_DOORBELL_UNLINK		4
93#define VMCI_DOORBELL_NOTIFY		5
94/*
95 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
96 * obsoleted by the removal of VM to VM communication.
97 */
98#define VMCI_DATAGRAM_REQUEST_MAP	6
99#define VMCI_DATAGRAM_REMOVE_MAP	7
100#define VMCI_EVENT_SUBSCRIBE		8
101#define VMCI_EVENT_UNSUBSCRIBE		9
102#define VMCI_QUEUEPAIR_ALLOC		10
103#define VMCI_QUEUEPAIR_DETACH		11
104/*
105 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
106 * WS 7.0/7.1 and ESX 4.1
107 */
108#define VMCI_HGFS_TRANSPORT		13
109#define VMCI_UNITY_PBRPC_REGISTER	14
110/*
111 * This resource is used for VMCI socket control packets sent to the
112 * hypervisor (CID 0) because RID 1 is already reserved.
113 */
114#define VSOCK_PACKET_HYPERVISOR_RID	15
115#define VMCI_RESOURCE_MAX		16
116/*
117 * The core VMCI device functionality only requires the resource IDs of
118 * VMCI_QUEUEPAIR_DETACH and below.
119 */
120#define VMCI_CORE_DEVICE_RESOURCE_MAX	VMCI_QUEUEPAIR_DETACH
121
122/*
123 * VMCI reserved host datagram resource IDs.
124 * vsock control channel has resource id 1.
125 */
126#define VMCI_DVFILTER_DATA_PATH_DATAGRAM	2
127
128/* VMCI Ids. */
129typedef uint32_t vmci_id;
130
131struct vmci_id_range {
132	int8_t	action;	/* VMCI_FA_X, for use in filters. */
133	vmci_id	begin;	/* Beginning of range. */
134	vmci_id	end;	/* End of range. */
135};
136
137struct vmci_handle {
138	vmci_id	context;
139	vmci_id	resource;
140};
141
142static inline struct vmci_handle
143VMCI_MAKE_HANDLE(vmci_id cid, vmci_id rid)
144{
145	struct vmci_handle h;
146
147	h.context = cid;
148	h.resource = rid;
149	return (h);
150}
151
152#define VMCI_HANDLE_TO_CONTEXT_ID(_handle)				\
153	((_handle).context)
154#define VMCI_HANDLE_TO_RESOURCE_ID(_handle)				\
155	((_handle).resource)
156#define VMCI_HANDLE_EQUAL(_h1, _h2)					\
157	((_h1).context == (_h2).context && (_h1).resource == (_h2).resource)
158
159#define VMCI_INVALID_ID			0xFFFFFFFF
160static const struct vmci_handle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID,
161	    VMCI_INVALID_ID};
162
163#define VMCI_HANDLE_INVALID(_handle)					\
164	VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE)
165
166/*
167 * The below defines can be used to send anonymous requests.
168 * This also indicates that no response is expected.
169 */
170#define VMCI_ANON_SRC_CONTEXT_ID					\
171	VMCI_INVALID_ID
172#define VMCI_ANON_SRC_RESOURCE_ID					\
173	VMCI_INVALID_ID
174#define VMCI_ANON_SRC_HANDLE						\
175	VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID,			\
176	VMCI_ANON_SRC_RESOURCE_ID)
177
178/* The lowest 16 context ids are reserved for internal use. */
179#define VMCI_RESERVED_CID_LIMIT		16
180
181/*
182 * Hypervisor context id, used for calling into hypervisor
183 * supplied services from the VM.
184 */
185#define VMCI_HYPERVISOR_CONTEXT_ID	0
186
187/*
188 * Well-known context id, a logical context that contains a set of
189 * well-known services. This context ID is now obsolete.
190 */
191#define VMCI_WELL_KNOWN_CONTEXT_ID	1
192
193/*
194 * Context ID used by host endpoints.
195 */
196#define VMCI_HOST_CONTEXT_ID		2
197#define VMCI_HOST_CONTEXT_INVALID_EVENT	((uintptr_t)~0)
198
199#define VMCI_CONTEXT_IS_VM(_cid)					\
200	(VMCI_INVALID_ID != _cid && _cid > VMCI_HOST_CONTEXT_ID)
201
202/*
203 * The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make
204 * handles that refer to a specific context.
205 */
206#define VMCI_CONTEXT_RESOURCE_ID	0
207
208/*
209 *------------------------------------------------------------------------------
210 *
211 * VMCI error codes.
212 *
213 *------------------------------------------------------------------------------
214 */
215
216#define VMCI_SUCCESS_QUEUEPAIR_ATTACH		5
217#define VMCI_SUCCESS_QUEUEPAIR_CREATE		4
218#define VMCI_SUCCESS_LAST_DETACH		3
219#define VMCI_SUCCESS_ACCESS_GRANTED		2
220#define VMCI_SUCCESS_ENTRY_DEAD			1
221#define VMCI_SUCCESS				0LL
222#define VMCI_ERROR_INVALID_RESOURCE		(-1)
223#define VMCI_ERROR_INVALID_ARGS			(-2)
224#define VMCI_ERROR_NO_MEM			(-3)
225#define VMCI_ERROR_DATAGRAM_FAILED		(-4)
226#define VMCI_ERROR_MORE_DATA			(-5)
227#define VMCI_ERROR_NO_MORE_DATAGRAMS		(-6)
228#define VMCI_ERROR_NO_ACCESS			(-7)
229#define VMCI_ERROR_NO_HANDLE			(-8)
230#define VMCI_ERROR_DUPLICATE_ENTRY		(-9)
231#define VMCI_ERROR_DST_UNREACHABLE		(-10)
232#define VMCI_ERROR_PAYLOAD_TOO_LARGE		(-11)
233#define VMCI_ERROR_INVALID_PRIV			(-12)
234#define VMCI_ERROR_GENERIC			(-13)
235#define VMCI_ERROR_PAGE_ALREADY_SHARED		(-14)
236#define VMCI_ERROR_CANNOT_SHARE_PAGE		(-15)
237#define VMCI_ERROR_CANNOT_UNSHARE_PAGE		(-16)
238#define VMCI_ERROR_NO_PROCESS			(-17)
239#define VMCI_ERROR_NO_DATAGRAM			(-18)
240#define VMCI_ERROR_NO_RESOURCES			(-19)
241#define VMCI_ERROR_UNAVAILABLE			(-20)
242#define VMCI_ERROR_NOT_FOUND			(-21)
243#define VMCI_ERROR_ALREADY_EXISTS		(-22)
244#define VMCI_ERROR_NOT_PAGE_ALIGNED		(-23)
245#define VMCI_ERROR_INVALID_SIZE			(-24)
246#define VMCI_ERROR_REGION_ALREADY_SHARED	(-25)
247#define VMCI_ERROR_TIMEOUT			(-26)
248#define VMCI_ERROR_DATAGRAM_INCOMPLETE		(-27)
249#define VMCI_ERROR_INCORRECT_IRQL		(-28)
250#define VMCI_ERROR_EVENT_UNKNOWN		(-29)
251#define VMCI_ERROR_OBSOLETE			(-30)
252#define VMCI_ERROR_QUEUEPAIR_MISMATCH		(-31)
253#define VMCI_ERROR_QUEUEPAIR_NOTSET		(-32)
254#define VMCI_ERROR_QUEUEPAIR_NOTOWNER		(-33)
255#define VMCI_ERROR_QUEUEPAIR_NOTATTACHED	(-34)
256#define VMCI_ERROR_QUEUEPAIR_NOSPACE		(-35)
257#define VMCI_ERROR_QUEUEPAIR_NODATA		(-36)
258#define VMCI_ERROR_BUSMEM_INVALIDATION		(-37)
259#define VMCI_ERROR_MODULE_NOT_LOADED		(-38)
260#define VMCI_ERROR_DEVICE_NOT_FOUND		(-39)
261#define VMCI_ERROR_QUEUEPAIR_NOT_READY		(-40)
262#define VMCI_ERROR_WOULD_BLOCK			(-41)
263
264/* VMCI clients should return error code withing this range */
265#define VMCI_ERROR_CLIENT_MIN			(-500)
266#define VMCI_ERROR_CLIENT_MAX			(-550)
267
268/* Internal error codes. */
269#define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT	(-1000)
270
271#define VMCI_PATH_MAX				256
272
273/* VMCI reserved events. */
274typedef uint32_t vmci_event_type;
275
276#define VMCI_EVENT_CTX_ID_UPDATE	0	// Only applicable to guest
277						// endpoints
278#define VMCI_EVENT_CTX_REMOVED		1	// Applicable to guest and host
279#define VMCI_EVENT_QP_RESUMED		2	// Only applicable to guest
280						// endpoints
281#define VMCI_EVENT_QP_PEER_ATTACH	3	// Applicable to guest, host
282						// and VMX
283#define VMCI_EVENT_QP_PEER_DETACH	4	// Applicable to guest, host
284						// and VMX
285#define VMCI_EVENT_MEM_ACCESS_ON	5	// Applicable to VMX and vmk. On
286						// vmk, this event has the
287						// Context payload type
288#define VMCI_EVENT_MEM_ACCESS_OFF	6	// Applicable to VMX and vmk.
289						// Same as above for the payload
290						// type
291#define VMCI_EVENT_GUEST_PAUSED		7	// Applicable to vmk. This
292						// event has the Context
293						// payload type
294#define VMCI_EVENT_GUEST_UNPAUSED	8	// Applicable to vmk. Same as
295						// above for the payload type.
296#define VMCI_EVENT_MAX			9
297
298/*
299 * Of the above events, a few are reserved for use in the VMX, and other
300 * endpoints (guest and host kernel) should not use them. For the rest of the
301 * events, we allow both host and guest endpoints to subscribe to them, to
302 * maintain the same API for host and guest endpoints.
303 */
304
305#define VMCI_EVENT_VALID_VMX(_event)					\
306	(_event == VMCI_EVENT_QP_PEER_ATTACH ||				\
307	_event == VMCI_EVENT_QP_PEER_DETACH ||				\
308	_event == VMCI_EVENT_MEM_ACCESS_ON ||				\
309	_event == VMCI_EVENT_MEM_ACCESS_OFF)
310
311#define VMCI_EVENT_VALID(_event)					\
312	(_event < VMCI_EVENT_MAX &&					\
313	_event != VMCI_EVENT_MEM_ACCESS_ON &&				\
314	_event != VMCI_EVENT_MEM_ACCESS_OFF &&				\
315	_event != VMCI_EVENT_GUEST_PAUSED &&				\
316	_event != VMCI_EVENT_GUEST_UNPAUSED)
317
318/* Reserved guest datagram resource ids. */
319#define VMCI_EVENT_HANDLER		0
320
321/*
322 * VMCI coarse-grained privileges (per context or host process/endpoint. An
323 * entity with the restricted flag is only allowed to interact with the
324 * hypervisor and trusted entities.
325 */
326typedef uint32_t vmci_privilege_flags;
327
328#define VMCI_PRIVILEGE_FLAG_RESTRICTED		0x01
329#define VMCI_PRIVILEGE_FLAG_TRUSTED		0x02
330#define VMCI_PRIVILEGE_ALL_FLAGS					\
331	(VMCI_PRIVILEGE_FLAG_RESTRICTED | VMCI_PRIVILEGE_FLAG_TRUSTED)
332#define VMCI_NO_PRIVILEGE_FLAGS			0x00
333#define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS	VMCI_NO_PRIVILEGE_FLAGS
334#define VMCI_LEAST_PRIVILEGE_FLAGS		VMCI_PRIVILEGE_FLAG_RESTRICTED
335#define VMCI_MAX_PRIVILEGE_FLAGS		VMCI_PRIVILEGE_FLAG_TRUSTED
336
337/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
338#define VMCI_RESERVED_RESOURCE_ID_MAX		1023
339
340#define VMCI_DOMAIN_NAME_MAXLEN			32
341
342#define VMCI_LGPFX				"vmci: "
343
344/*
345 * struct vmci_queue_header
346 *
347 * A Queue cannot stand by itself as designed. Each Queue's header contains a
348 * pointer into itself (the producer_tail) and into its peer (consumer_head).
349 * The reason for the separation is one of accessibility: Each end-point can
350 * modify two things: where the next location to enqueue is within its produce_q
351 * (producer_tail); and where the next dequeue location is in its consume_q
352 * (consumer_head).
353 *
354 * An end-point cannot modify the pointers of its peer (guest to guest; NOTE
355 * that in the host both queue headers are mapped r/w). But, each end-point
356 * needs read access to both Queue header structures in order to determine how
357 * much space is used (or left) in the Queue. This is because for an end-point
358 * to know how full its produce_q is, it needs to use the consumer_head that
359 * points into the produce_q but -that- consumer_head is in the Queue header
360 * for that end-points consume_q.
361 *
362 * Thoroughly confused?  Sorry.
363 *
364 * producer_tail: the point to enqueue new entrants.  When you approach a line
365 * in a store, for example, you walk up to the tail.
366 *
367 * consumer_head: the point in the queue from which the next element is
368 * dequeued. In other words, who is next in line is he who is at the head of
369 * the line.
370 *
371 * Also, producer_tail points to an empty byte in the Queue, whereas
372 * consumer_head points to a valid byte of data (unless producer_tail ==
373 * consumer_head in which case consumerHead does not point to a valid byte of
374 * data).
375 *
376 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
377 * the range [0, size-1].
378 *
379 * If produce_q_header->producer_tail == consume_q_header->consumer_head then
380 * the produce_q is empty.
381 */
382struct vmci_queue_header {
383	/* All fields are 64bit and aligned. */
384	struct vmci_handle	handle;		/* Identifier. */
385	volatile uint64_t	producer_tail;	/* Offset in this queue. */
386	volatile uint64_t	consumer_head;	/* Offset in peer queue. */
387};
388
389
390/*
391 * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
392 * size to be less than 4GB, and use 32bit atomic operations on the head and
393 * tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
394 * is an atomic read-modify-write. This will cause traces to fire when a 32bit
395 * consumer tries to read the producer's tail pointer, for example, because the
396 * consumer has read-only access to the producer's tail pointer.
397 *
398 * We provide the following macros to invoke 32bit or 64bit atomic operations
399 * based on the architecture the code is being compiled on.
400 */
401
402#ifdef __x86_64__
403#define QP_MAX_QUEUE_SIZE_ARCH		CONST64U(0xffffffffffffffff)
404#define qp_atomic_read_offset(x)	atomic_load_64(x)
405#define qp_atomic_write_offset(x, y)	atomic_store_64(x, y)
406#else /* __x86_64__ */
407	/*
408	 * Wrappers below are being used because atomic_store_<type> operates
409	 * on a specific <type>. Likewise for atomic_load_<type>
410	 */
411
412	static inline uint32_t
413	type_safe_atomic_read_32(void *var)
414	{
415		return (atomic_load_32((volatile uint32_t *)(var)));
416	}
417
418	static inline void
419	type_safe_atomic_write_32(void *var, uint32_t val)
420	{
421		atomic_store_32((volatile uint32_t *)(var), (uint32_t)(val));
422	}
423
424#define QP_MAX_QUEUE_SIZE_ARCH		CONST64U(0xffffffff)
425#define qp_atomic_read_offset(x)	type_safe_atomic_read_32((void *)(x))
426#define qp_atomic_write_offset(x, y)					\
427	type_safe_atomic_write_32((void *)(x), (uint32_t)(y))
428#endif /* __x86_64__ */
429
430/*
431 *------------------------------------------------------------------------------
432 *
433 * qp_add_pointer --
434 *
435 *     Helper to add a given offset to a head or tail pointer. Wraps the value
436 *     of the pointer around the max size of the queue.
437 *
438 * Results:
439 *     None.
440 *
441 * Side effects:
442 *     None.
443 *
444 *------------------------------------------------------------------------------
445 */
446
447static inline void
448qp_add_pointer(volatile uint64_t *var, size_t add, uint64_t size)
449{
450	uint64_t new_val = qp_atomic_read_offset(var);
451
452	if (new_val >= size - add)
453		new_val -= size;
454
455	new_val += add;
456	qp_atomic_write_offset(var, new_val);
457}
458
459/*
460 *------------------------------------------------------------------------------
461 *
462 * vmci_queue_header_producer_tail --
463 *
464 *     Helper routine to get the Producer Tail from the supplied queue.
465 *
466 * Results:
467 *     The contents of the queue's producer tail.
468 *
469 * Side effects:
470 *     None.
471 *
472 *------------------------------------------------------------------------------
473 */
474
475static inline uint64_t
476vmci_queue_header_producer_tail(const struct vmci_queue_header *q_header)
477{
478	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
479	return (qp_atomic_read_offset(&qh->producer_tail));
480}
481
482/*
483 *------------------------------------------------------------------------------
484 *
485 * vmci_queue_header_consumer_head --
486 *
487 *     Helper routine to get the Consumer Head from the supplied queue.
488 *
489 * Results:
490 *     The contents of the queue's consumer tail.
491 *
492 * Side effects:
493 *     None.
494 *
495 *------------------------------------------------------------------------------
496 */
497
498static inline uint64_t
499vmci_queue_header_consumer_head(const struct vmci_queue_header *q_header)
500{
501	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
502	return (qp_atomic_read_offset(&qh->consumer_head));
503}
504
505/*
506 *------------------------------------------------------------------------------
507 *
508 * vmci_queue_header_add_producer_tail --
509 *
510 *     Helper routine to increment the Producer Tail. Fundamentally,
511 *     qp_add_pointer() is used to manipulate the tail itself.
512 *
513 * Results:
514 *     None.
515 *
516 * Side effects:
517 *     None.
518 *
519 *------------------------------------------------------------------------------
520 */
521
522static inline void
523vmci_queue_header_add_producer_tail(struct vmci_queue_header *q_header,
524    size_t add, uint64_t queue_size)
525{
526
527	qp_add_pointer(&q_header->producer_tail, add, queue_size);
528}
529
530/*
531 *------------------------------------------------------------------------------
532 *
533 * vmci_queue_header_add_consumer_head --
534 *
535 *     Helper routine to increment the Consumer Head. Fundamentally,
536 *     qp_add_pointer() is used to manipulate the head itself.
537 *
538 * Results:
539 *     None.
540 *
541 * Side effects:
542 *     None.
543 *
544 *------------------------------------------------------------------------------
545 */
546
547static inline void
548vmci_queue_header_add_consumer_head(struct vmci_queue_header *q_header,
549    size_t add, uint64_t queue_size)
550{
551
552	qp_add_pointer(&q_header->consumer_head, add, queue_size);
553}
554
555/*
556 *------------------------------------------------------------------------------
557 *
558 * vmci_queue_header_get_pointers --
559 *
560 *     Helper routine for getting the head and the tail pointer for a queue.
561 *     Both the VMCIQueues are needed to get both the pointers for one queue.
562 *
563 * Results:
564 *     None.
565 *
566 * Side effects:
567 *     None.
568 *
569 *------------------------------------------------------------------------------
570 */
571
572static inline void
573vmci_queue_header_get_pointers(const struct vmci_queue_header *produce_q_header,
574    const struct vmci_queue_header *consume_q_header, uint64_t *producer_tail,
575    uint64_t *consumer_head)
576{
577
578	if (producer_tail)
579		*producer_tail =
580		    vmci_queue_header_producer_tail(produce_q_header);
581
582	if (consumer_head)
583		*consumer_head =
584		    vmci_queue_header_consumer_head(consume_q_header);
585}
586
587/*
588 *------------------------------------------------------------------------------
589 *
590 * vmci_queue_header_reset_pointers --
591 *
592 *     Reset the tail pointer (of "this" queue) and the head pointer (of "peer"
593 *     queue).
594 *
595 * Results:
596 *     None.
597 *
598 * Side effects:
599 *     None.
600 *
601 *------------------------------------------------------------------------------
602 */
603
604static inline void
605vmci_queue_header_reset_pointers(struct vmci_queue_header *q_header)
606{
607
608	qp_atomic_write_offset(&q_header->producer_tail, CONST64U(0));
609	qp_atomic_write_offset(&q_header->consumer_head, CONST64U(0));
610}
611
612/*
613 *------------------------------------------------------------------------------
614 *
615 * vmci_queue_header_init --
616 *
617 *     Initializes a queue's state (head & tail pointers).
618 *
619 * Results:
620 *     None.
621 *
622 * Side effects:
623 *     None.
624 *
625 *------------------------------------------------------------------------------
626 */
627
628static inline void
629vmci_queue_header_init(struct vmci_queue_header *q_header,
630    const struct vmci_handle handle)
631{
632
633	q_header->handle = handle;
634	vmci_queue_header_reset_pointers(q_header);
635}
636
637/*
638 *------------------------------------------------------------------------------
639 *
640 * vmci_queue_header_free_space --
641 *
642 *     Finds available free space in a produce queue to enqueue more data or
643 *     reports an error if queue pair corruption is detected.
644 *
645 * Results:
646 *     Free space size in bytes or an error code.
647 *
648 * Side effects:
649 *     None.
650 *
651 *------------------------------------------------------------------------------
652 */
653
654static inline int64_t
655vmci_queue_header_free_space(const struct vmci_queue_header *produce_q_header,
656    const struct vmci_queue_header *consume_q_header,
657    const uint64_t produce_q_size)
658{
659	uint64_t free_space;
660	uint64_t head;
661	uint64_t tail;
662
663	tail = vmci_queue_header_producer_tail(produce_q_header);
664	head = vmci_queue_header_consumer_head(consume_q_header);
665
666	if (tail >= produce_q_size || head >= produce_q_size)
667		return (VMCI_ERROR_INVALID_SIZE);
668
669	/*
670	 * Deduct 1 to avoid tail becoming equal to head which causes ambiguity.
671	 * If head and tail are equal it means that the queue is empty.
672	 */
673
674	if (tail >= head)
675		free_space = produce_q_size - (tail - head) - 1;
676	else
677		free_space = head - tail - 1;
678
679	return (free_space);
680}
681
682/*
683 *------------------------------------------------------------------------------
684 *
685 * vmci_queue_header_buf_ready --
686 *
687 *     vmci_queue_header_free_space() does all the heavy lifting of determing
688 *     the number of free bytes in a Queue. This routine, then subtracts that
689 *     size from the full size of the Queue so the caller knows how many bytes
690 *     are ready to be dequeued.
691 *
692 * Results:
693 *     On success, available data size in bytes (up to MAX_INT64).
694 *     On failure, appropriate error code.
695 *
696 * Side effects:
697 *     None.
698 *
699 *------------------------------------------------------------------------------
700 */
701
702static inline int64_t
703vmci_queue_header_buf_ready(const struct vmci_queue_header *consume_q_header,
704    const struct vmci_queue_header *produce_q_header,
705    const uint64_t consume_q_size)
706{
707	int64_t free_space;
708
709	free_space = vmci_queue_header_free_space(consume_q_header,
710	    produce_q_header, consume_q_size);
711	if (free_space < VMCI_SUCCESS)
712		return (free_space);
713	else
714		return (consume_q_size - free_space - 1);
715}
716
717#endif /* !_VMCI_DEFS_H_ */
718