1/*-
2 * Copyright(c) 2002-2011 Exar Corp.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification are permitted provided the following conditions are met:
7 *
8 *    1. Redistributions of source code must retain the above copyright notice,
9 *       this list of conditions and the following disclaimer.
10 *
11 *    2. Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *
15 *    3. Neither the name of the Exar Corporation nor the names of its
16 *       contributors may be used to endorse or promote products derived from
17 *       this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31/*$FreeBSD$*/
32
33#include <dev/vxge/vxgehal/vxgehal.h>
34
35/*
36 * vxge_queue_item_data - Get item's data.
37 * @item: Queue item.
38 *
39 * Returns:  item data(variable size). Note that vxge_queue_t
40 * contains items comprized of a fixed vxge_queue_item_t "header"
41 * and a variable size data. This function returns the variable
42 * user-defined portion of the queue item.
43 */
44void *
45vxge_queue_item_data(vxge_queue_item_t *item)
46{
47	return (char *) item + sizeof(vxge_queue_item_t);
48}
49
50/*
51 * __queue_consume - (Lockless) dequeue an item from the specified queue.
52 *
53 * @queue: Event queue.
54 * @data_max_size: Maximum size of the data
55 * @item: Queue item
56 * See vxge_queue_consume().
57 */
58static vxge_queue_status_e
59__queue_consume(vxge_queue_t *queue,
60    u32 data_max_size,
61    vxge_queue_item_t *item)
62{
63	int real_size;
64	vxge_queue_item_t *elem;
65	__hal_device_t *hldev;
66
67	vxge_assert(queue != NULL);
68
69	hldev = (__hal_device_t *) queue->hldev;
70
71	vxge_hal_trace_log_queue("==> %s:%s:%d",
72	    __FILE__, __func__, __LINE__);
73
74	vxge_hal_trace_log_queue(
75	    "queue = 0x"VXGE_OS_STXFMT", size = %d, item = 0x"VXGE_OS_STXFMT,
76	    (ptr_t) queue, data_max_size, (ptr_t) item);
77
78	if (vxge_list_is_empty(&queue->list_head)) {
79		vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
80		    __FILE__, __func__, __LINE__, VXGE_QUEUE_IS_EMPTY);
81		return (VXGE_QUEUE_IS_EMPTY);
82	}
83
84	elem = (vxge_queue_item_t *) queue->list_head.next;
85	if (elem->data_size > data_max_size) {
86		vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
87		    __FILE__, __func__, __LINE__, VXGE_QUEUE_NOT_ENOUGH_SPACE);
88		return (VXGE_QUEUE_NOT_ENOUGH_SPACE);
89	}
90
91	vxge_list_remove(&elem->item);
92	real_size = elem->data_size + sizeof(vxge_queue_item_t);
93	if (queue->head_ptr == elem) {
94		queue->head_ptr = (char *) queue->head_ptr + real_size;
95		vxge_hal_info_log_queue("event_type: %d \
96		    removing from the head: "
97		    "0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT
98		    ":0x0x"VXGE_OS_STXFMT" elem 0x0x"VXGE_OS_STXFMT" length %d",
99		    elem->event_type, (ptr_t) queue->start_ptr,
100		    (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
101		    (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
102	} else if ((char *) queue->tail_ptr - real_size == (char *) elem) {
103		queue->tail_ptr = (char *) queue->tail_ptr - real_size;
104		vxge_hal_info_log_queue("event_type: %d \
105		    removing from the tail: "
106		    "0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT
107		    ":0x"VXGE_OS_STXFMT" elem 0x"VXGE_OS_STXFMT" length %d",
108		    elem->event_type, (ptr_t) queue->start_ptr,
109		    (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
110		    (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
111	} else {
112		vxge_hal_info_log_queue("event_type: %d \
113		    removing from the list: "
114		    "0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT
115		    ":0x"VXGE_OS_STXFMT" elem 0x"VXGE_OS_STXFMT" length %d",
116		    elem->event_type, (ptr_t) queue->start_ptr,
117		    (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
118		    (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
119	}
120	vxge_assert(queue->tail_ptr >= queue->head_ptr);
121	vxge_assert(queue->tail_ptr >= queue->start_ptr &&
122	    queue->tail_ptr <= queue->end_ptr);
123	vxge_assert(queue->head_ptr >= queue->start_ptr &&
124	    queue->head_ptr < queue->end_ptr);
125	vxge_os_memcpy(item, elem, sizeof(vxge_queue_item_t));
126	vxge_os_memcpy(vxge_queue_item_data(item), vxge_queue_item_data(elem),
127	    elem->data_size);
128
129	if (vxge_list_is_empty(&queue->list_head)) {
130		/* reset buffer pointers just to be clean */
131		queue->head_ptr = queue->tail_ptr = queue->start_ptr;
132	}
133
134	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
135	    __FILE__, __func__, __LINE__);
136
137	return (VXGE_QUEUE_OK);
138}
139
140/*
141 * vxge_queue_produce - Enqueue an item (see vxge_queue_item_t {})
142 *			 into the specified queue.
143 * @queueh: Queue handle.
144 * @event_type: Event type. One of the enumerated event types
145 *		 that both consumer and producer "understand".
146 *		 For an example, please refer to vxge_hal_event_e.
147 * @context: Opaque (void *) "context", for instance event producer object.
148 * @is_critical: For critical event, e.g. ECC.
149 * @data_size: Size of the @data.
150 * @data: User data of variable @data_size that is _copied_ into
151 *	the new queue item (see vxge_queue_item_t {}). Upon return
152 *	from the call the @data memory can be re-used or released.
153 *
154 * Enqueue a new item.
155 *
156 * Returns: VXGE_QUEUE_OK - success.
157 * VXGE_QUEUE_IS_FULL - Queue is full.
158 * VXGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
159 *
160 * See also: vxge_queue_item_t {}, vxge_queue_consume().
161 */
162vxge_queue_status_e
163vxge_queue_produce(vxge_queue_h queueh,
164    u32 event_type,
165    void *context,
166    u32 is_critical,
167    const u32 data_size,
168    void *data)
169{
170	vxge_queue_t *queue = (vxge_queue_t *) queueh;
171	int real_size = data_size + sizeof(vxge_queue_item_t);
172	__hal_device_t *hldev;
173	vxge_queue_item_t *elem;
174	unsigned long flags = 0;
175
176	vxge_assert(queueh != NULL);
177
178	hldev = (__hal_device_t *) queue->hldev;
179
180	vxge_hal_trace_log_queue("==> %s:%s:%d",
181	    __FILE__, __func__, __LINE__);
182
183	vxge_hal_trace_log_queue(
184	    "queueh = 0x"VXGE_OS_STXFMT", event_type = %d, "
185	    "context = 0x"VXGE_OS_STXFMT", is_critical = %d, "
186	    "data_size = %d, data = 0x"VXGE_OS_STXFMT,
187	    (ptr_t) queueh, event_type, (ptr_t) context,
188	    is_critical, data_size, (ptr_t) data);
189
190	vxge_assert(real_size <= VXGE_QUEUE_BUF_SIZE);
191
192	vxge_os_spin_lock_irq(&queue->lock, flags);
193
194	if (is_critical && !queue->has_critical_event) {
195		unsigned char item_buf[sizeof(vxge_queue_item_t) +
196		    VXGE_DEFAULT_EVENT_MAX_DATA_SIZE];
197		vxge_queue_item_t *item =
198		    (vxge_queue_item_t *) (void *)item_buf;
199
200		while (__queue_consume(queue, VXGE_DEFAULT_EVENT_MAX_DATA_SIZE,
201		    item) != VXGE_QUEUE_IS_EMPTY) {
202		}		/* do nothing */
203	}
204
205try_again:
206	if ((char *) queue->tail_ptr + real_size <= (char *) queue->end_ptr) {
207		elem = (vxge_queue_item_t *) queue->tail_ptr;
208		queue->tail_ptr = (void *)((char *) queue->tail_ptr + real_size);
209		vxge_hal_info_log_queue("event_type: %d adding to the tail: "
210		    "0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT
211		    ":0x"VXGE_OS_STXFMT" elem 0x"VXGE_OS_STXFMT" length %d",
212		    event_type, (ptr_t) queue->start_ptr,
213		    (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
214		    (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
215	} else if ((char *) queue->head_ptr - real_size >=
216	    (char *) queue->start_ptr) {
217		elem = (vxge_queue_item_t *)
218		    ((void *)((char *) queue->head_ptr - real_size));
219		queue->head_ptr = elem;
220		vxge_hal_info_log_queue("event_type: %d adding to the head: "
221		    "0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT":"
222		    "0x"VXGE_OS_STXFMT":0x"VXGE_OS_STXFMT" length %d",
223		    event_type, (ptr_t) queue->start_ptr,
224		    (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
225		    (ptr_t) queue->end_ptr, real_size);
226	} else {
227		vxge_queue_status_e status;
228
229		if (queue->pages_current >= queue->pages_max) {
230			vxge_os_spin_unlock_irq(&queue->lock, flags);
231			vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
232			    __FILE__, __func__, __LINE__, VXGE_QUEUE_IS_FULL);
233			return (VXGE_QUEUE_IS_FULL);
234		}
235
236		if (queue->has_critical_event) {
237			vxge_os_spin_unlock_irq(&queue->lock, flags);
238			vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
239			    __FILE__, __func__, __LINE__, VXGE_QUEUE_IS_FULL);
240			return (VXGE_QUEUE_IS_FULL);
241		}
242
243		/* grow */
244		status = vxge_io_queue_grow(queueh);
245		if (status != VXGE_QUEUE_OK) {
246			vxge_os_spin_unlock_irq(&queue->lock, flags);
247			vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
248			    __FILE__, __func__, __LINE__, status);
249			return (status);
250		}
251
252		goto try_again;
253	}
254	vxge_assert(queue->tail_ptr >= queue->head_ptr);
255	vxge_assert(queue->tail_ptr >= queue->start_ptr &&
256	    queue->tail_ptr <= queue->end_ptr);
257	vxge_assert(queue->head_ptr >= queue->start_ptr &&
258	    queue->head_ptr < queue->end_ptr);
259	elem->data_size = data_size;
260	elem->event_type = (vxge_hal_event_e) event_type;
261	elem->is_critical = is_critical;
262	if (is_critical)
263		queue->has_critical_event = 1;
264	elem->context = context;
265	vxge_os_memcpy(vxge_queue_item_data(elem), data, data_size);
266	vxge_list_insert_before(&elem->item, &queue->list_head);
267	vxge_os_spin_unlock_irq(&queue->lock, flags);
268
269	/* no lock taken! */
270	queue->queued_func(queue->queued_data, event_type);
271
272	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
273	    __FILE__, __func__, __LINE__);
274
275	return (VXGE_QUEUE_OK);
276}
277
278
279/*
280 * vxge_queue_create - Create protected first-in-first-out queue.
281 * @devh: HAL device handle.
282 * @pages_initial: Number of pages to be initially allocated at the
283 * time of queue creation.
284 * @pages_max: Max number of pages that can be allocated in the queue.
285 * @queued_func: Optional callback function to be called each time a new item is
286 * added to the queue.
287 * @queued_data: Argument to the callback function.
288 *
289 * Create protected (fifo) queue.
290 *
291 * Returns: Pointer to vxge_queue_t structure,
292 * NULL - on failure.
293 *
294 * See also: vxge_queue_item_t {}, vxge_queue_destroy().
295 */
296vxge_queue_h
297vxge_queue_create(vxge_hal_device_h devh,
298    u32 pages_initial,
299    u32 pages_max,
300    vxge_queued_f queued_func,
301    void *queued_data)
302{
303	vxge_queue_t *queue;
304	__hal_device_t *hldev = (__hal_device_t *) devh;
305
306	vxge_assert(devh != NULL);
307
308	vxge_hal_trace_log_queue("==> %s:%s:%d",
309	    __FILE__, __func__, __LINE__);
310
311	vxge_hal_trace_log_queue(
312	    "devh = 0x"VXGE_OS_STXFMT", pages_initial = %d, "
313	    "pages_max = %d, queued_func = 0x"VXGE_OS_STXFMT", "
314	    "queued_data = 0x"VXGE_OS_STXFMT, (ptr_t) devh, pages_initial,
315	    pages_max, (ptr_t) queued_func, (ptr_t) queued_data);
316
317	if ((queue = (vxge_queue_t *) vxge_os_malloc(hldev->header.pdev,
318	    sizeof(vxge_queue_t))) == NULL) {
319		vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
320		    __FILE__, __func__, __LINE__, VXGE_QUEUE_OUT_OF_MEMORY);
321		return (NULL);
322	}
323
324	queue->queued_func = queued_func;
325	queue->queued_data = queued_data;
326	queue->hldev = devh;
327	queue->pdev = hldev->header.pdev;
328	queue->irqh = hldev->header.irqh;
329	queue->pages_current = pages_initial;
330	queue->start_ptr = vxge_os_malloc(hldev->header.pdev,
331	    queue->pages_current * VXGE_QUEUE_BUF_SIZE);
332	if (queue->start_ptr == NULL) {
333		vxge_os_free(hldev->header.pdev, queue, sizeof(vxge_queue_t));
334		vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
335		    __FILE__, __func__, __LINE__, VXGE_QUEUE_OUT_OF_MEMORY);
336		return (NULL);
337	}
338	queue->head_ptr = queue->tail_ptr = queue->start_ptr;
339	queue->end_ptr = (char *) queue->start_ptr +
340	    queue->pages_current * VXGE_QUEUE_BUF_SIZE;
341	vxge_os_spin_lock_init_irq(&queue->lock, queue->irqh);
342	queue->pages_initial = pages_initial;
343	queue->pages_max = pages_max;
344	vxge_list_init(&queue->list_head);
345
346	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
347	    __FILE__, __func__, __LINE__);
348
349	return (queue);
350}
351
352/*
353 * vxge_queue_destroy - Destroy vxge_queue_t object.
354 * @queueh: Queue handle.
355 *
356 * Destroy the specified vxge_queue_t object.
357 *
358 * See also: vxge_queue_item_t {}, vxge_queue_create().
359 */
360void
361vxge_queue_destroy(vxge_queue_h queueh)
362{
363	vxge_queue_t *queue = (vxge_queue_t *) queueh;
364	__hal_device_t *hldev;
365
366	vxge_assert(queueh != NULL);
367
368	hldev = (__hal_device_t *) queue->hldev;
369
370	vxge_hal_trace_log_queue("==> %s:%s:%d",
371	    __FILE__, __func__, __LINE__);
372
373	vxge_hal_trace_log_queue("queueh = 0x"VXGE_OS_STXFMT,
374	    (ptr_t) queueh);
375
376	vxge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
377	if (!vxge_list_is_empty(&queue->list_head)) {
378		vxge_hal_trace_log_queue("destroying non-empty queue 0x"
379		    VXGE_OS_STXFMT, (ptr_t) queue);
380	}
381	vxge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
382	    VXGE_QUEUE_BUF_SIZE);
383
384	vxge_os_free(queue->pdev, queue, sizeof(vxge_queue_t));
385
386	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
387	    __FILE__, __func__, __LINE__);
388}
389
390/*
391 * vxge_io_queue_grow - Dynamically increases the size of the queue.
392 * @queueh: Queue handle.
393 *
394 * This function is called in the case of no slot avaialble in the queue
395 * to accommodate the newly received event.
396 * Note that queue cannot grow beyond the max size specified for the
397 * queue.
398 *
399 * Returns VXGE_QUEUE_OK: On success.
400 * VXGE_QUEUE_OUT_OF_MEMORY : No memory is available.
401 */
402vxge_queue_status_e
403vxge_io_queue_grow(vxge_queue_h queueh)
404{
405	vxge_queue_t *queue = (vxge_queue_t *) queueh;
406	__hal_device_t *hldev;
407	void *newbuf, *oldbuf;
408	vxge_list_t *item;
409	vxge_queue_item_t *elem;
410
411	vxge_assert(queueh != NULL);
412
413	hldev = (__hal_device_t *) queue->hldev;
414
415	vxge_hal_trace_log_queue("==> %s:%s:%d",
416	    __FILE__, __func__, __LINE__);
417
418	vxge_hal_trace_log_queue("queueh = 0x"VXGE_OS_STXFMT,
419	    (ptr_t) queueh);
420
421	vxge_hal_info_log_queue("queue 0x"VXGE_OS_STXFMT":%d is growing",
422	    (ptr_t) queue, queue->pages_current);
423
424	newbuf = vxge_os_malloc(queue->pdev,
425	    (queue->pages_current + 1) * VXGE_QUEUE_BUF_SIZE);
426	if (newbuf == NULL) {
427		vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
428		    __FILE__, __func__, __LINE__, VXGE_QUEUE_OUT_OF_MEMORY);
429		return (VXGE_QUEUE_OUT_OF_MEMORY);
430	}
431
432	vxge_os_memcpy(newbuf, queue->start_ptr,
433	    queue->pages_current * VXGE_QUEUE_BUF_SIZE);
434	oldbuf = queue->start_ptr;
435
436	/* adjust queue sizes */
437	queue->start_ptr = newbuf;
438	queue->end_ptr = (char *) newbuf +
439	    (queue->pages_current + 1) * VXGE_QUEUE_BUF_SIZE;
440	queue->tail_ptr = (char *) newbuf +
441	/* LINTED */
442	    ((char *) queue->tail_ptr - (char *) oldbuf);
443	queue->head_ptr = (char *) newbuf +
444	/* LINTED */
445	    ((char *) queue->head_ptr - (char *) oldbuf);
446	vxge_assert(!vxge_list_is_empty(&queue->list_head));
447	queue->list_head.next = (vxge_list_t *) (void *)((char *) newbuf +
448	/* LINTED */
449	    ((char *) queue->list_head.next - (char *) oldbuf));
450	queue->list_head.prev = (vxge_list_t *) (void *)((char *) newbuf +
451	/* LINTED */
452	    ((char *) queue->list_head.prev - (char *) oldbuf));
453	/* adjust queue list */
454	vxge_list_for_each(item, &queue->list_head) {
455		elem = vxge_container_of(item, vxge_queue_item_t, item);
456		if (elem->item.next != &queue->list_head) {
457			elem->item.next =
458			    (vxge_list_t *) (void *)((char *) newbuf +
459			/* LINTED */
460			    ((char *) elem->item.next - (char *) oldbuf));
461		}
462		if (elem->item.prev != &queue->list_head) {
463			elem->item.prev =
464			    (vxge_list_t *) (void *)((char *) newbuf +
465			/* LINTED */
466			    ((char *) elem->item.prev - (char *) oldbuf));
467		}
468	}
469	vxge_os_free(queue->pdev, oldbuf,
470	    queue->pages_current * VXGE_QUEUE_BUF_SIZE);
471	queue->pages_current++;
472
473	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
474	    __FILE__, __func__, __LINE__);
475	return (VXGE_QUEUE_OK);
476}
477
478/*
479 * vxge_queue_consume - Dequeue an item from the specified queue.
480 * @queueh: Queue handle.
481 * @data_max_size: Maximum expected size of the item.
482 * @item: Memory area into which the item is _copied_ upon return
483 *	from the function.
484 *
485 * Dequeue an item from the queue. The caller is required to provide
486 * enough space for the item.
487 *
488 * Returns: VXGE_QUEUE_OK - success.
489 * VXGE_QUEUE_IS_EMPTY - Queue is empty.
490 * VXGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
491 * is too small to accommodate an item from the queue.
492 *
493 * See also: vxge_queue_item_t {}, vxge_queue_produce().
494 */
495vxge_queue_status_e
496vxge_queue_consume(vxge_queue_h queueh,
497    u32 data_max_size,
498    vxge_queue_item_t *item)
499{
500	vxge_queue_t *queue = (vxge_queue_t *) queueh;
501	__hal_device_t *hldev;
502	unsigned long flags = 0;
503	vxge_queue_status_e status;
504
505	vxge_assert(queueh != NULL);
506
507	hldev = (__hal_device_t *) queue->hldev;
508
509	vxge_hal_trace_log_queue("==> %s:%s:%d",
510	    __FILE__, __func__, __LINE__);
511
512	vxge_hal_trace_log_queue(
513	    "queueh = 0x"VXGE_OS_STXFMT", data_max_size = %d, "
514	    "item = 0x"VXGE_OS_STXFMT, (ptr_t) queueh,
515	    data_max_size, (ptr_t) item);
516
517	vxge_os_spin_lock_irq(&queue->lock, flags);
518	status = __queue_consume(queue, data_max_size, item);
519	vxge_os_spin_unlock_irq(&queue->lock, flags);
520
521	vxge_hal_trace_log_queue("<== %s:%s:%d Result = %d",
522	    __FILE__, __func__, __LINE__, status);
523	return (status);
524}
525
526
527/*
528 * vxge_queue_flush - Flush, or empty, the queue.
529 * @queueh: Queue handle.
530 *
531 * Flush the queue, i.e. make it empty by consuming all events
532 * without invoking the event processing logic (callbacks, etc.)
533 */
534void
535vxge_queue_flush(vxge_queue_h queueh)
536{
537	unsigned char item_buf[sizeof(vxge_queue_item_t) +
538	    VXGE_DEFAULT_EVENT_MAX_DATA_SIZE];
539	vxge_queue_item_t *item = (vxge_queue_item_t *) (void *)item_buf;
540	vxge_queue_t *queue = (vxge_queue_t *) queueh;
541	__hal_device_t *hldev;
542
543	vxge_assert(queueh != NULL);
544
545	hldev = (__hal_device_t *) queue->hldev;
546
547	vxge_hal_trace_log_queue("==> %s:%s:%d",
548	    __FILE__, __func__, __LINE__);
549
550	vxge_hal_trace_log_queue("queueh = 0x"VXGE_OS_STXFMT,
551	    (ptr_t) queueh);
552
553	/* flush queue by consuming all enqueued items */
554	while (vxge_queue_consume(queueh, VXGE_DEFAULT_EVENT_MAX_DATA_SIZE,
555	    item) != VXGE_QUEUE_IS_EMPTY) {
556		/* do nothing */
557		vxge_hal_trace_log_queue("item 0x"VXGE_OS_STXFMT"(%d) flushed",
558		    (ptr_t) item, item->event_type);
559	}
560
561	(void) vxge_queue_get_reset_critical(queueh);
562
563	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
564	    __FILE__, __func__, __LINE__);
565}
566
567/*
568 * vxge_queue_get_reset_critical - Check for critical events in the queue,
569 * @queueh: Queue handle.
570 *
571 * Check for critical event(s) in the queue, and reset the
572 * "has-critical-event" flag upon return.
573 * Returns: 1 - if the queue contains atleast one critical event.
574 * 0 - If there are no critical events in the queue.
575 */
576u32
577vxge_queue_get_reset_critical(vxge_queue_h queueh)
578{
579	vxge_queue_t *queue = (vxge_queue_t *) queueh;
580	int c = queue->has_critical_event;
581	__hal_device_t *hldev;
582
583	vxge_assert(queueh != NULL);
584
585	hldev = (__hal_device_t *) queue->hldev;
586
587	vxge_hal_trace_log_queue("==> %s:%s:%d",
588	    __FILE__, __func__, __LINE__);
589
590	vxge_hal_trace_log_queue("queueh = 0x"VXGE_OS_STXFMT,
591	    (ptr_t) queueh);
592
593	queue->has_critical_event = 0;
594
595	vxge_hal_trace_log_queue("<== %s:%s:%d Result = 0",
596	    __FILE__, __func__, __LINE__);
597	return (c);
598}
599