Lines Matching refs:queue

42  * user-defined portion of the queue item.
51 * __queue_consume - (Lockless) dequeue an item from the specified queue.
53 * @queue: Event queue.
59 __queue_consume(vxge_queue_t *queue,
67 vxge_assert(queue != NULL);
69 hldev = (__hal_device_t *) queue->hldev;
75 "queue = 0x"VXGE_OS_STXFMT", size = %d, item = 0x"VXGE_OS_STXFMT,
76 (ptr_t) queue, data_max_size, (ptr_t) item);
78 if (vxge_list_is_empty(&queue->list_head)) {
84 elem = (vxge_queue_item_t *) queue->list_head.next;
93 if (queue->head_ptr == elem) {
94 queue->head_ptr = (char *) queue->head_ptr + real_size;
99 elem->event_type, (ptr_t) queue->start_ptr,
100 (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
101 (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
102 } else if ((char *) queue->tail_ptr - real_size == (char *) elem) {
103 queue->tail_ptr = (char *) queue->tail_ptr - real_size;
108 elem->event_type, (ptr_t) queue->start_ptr,
109 (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
110 (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
116 elem->event_type, (ptr_t) queue->start_ptr,
117 (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
118 (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
120 vxge_assert(queue->tail_ptr >= queue->head_ptr);
121 vxge_assert(queue->tail_ptr >= queue->start_ptr &&
122 queue->tail_ptr <= queue->end_ptr);
123 vxge_assert(queue->head_ptr >= queue->start_ptr &&
124 queue->head_ptr < queue->end_ptr);
129 if (vxge_list_is_empty(&queue->list_head)) {
131 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
142 * into the specified queue.
151 * the new queue item (see vxge_queue_item_t {}). Upon return
170 vxge_queue_t *queue = (vxge_queue_t *) queueh;
178 hldev = (__hal_device_t *) queue->hldev;
192 vxge_os_spin_lock_irq(&queue->lock, flags);
194 if (is_critical && !queue->has_critical_event) {
200 while (__queue_consume(queue, VXGE_DEFAULT_EVENT_MAX_DATA_SIZE,
206 if ((char *) queue->tail_ptr + real_size <= (char *) queue->end_ptr) {
207 elem = (vxge_queue_item_t *) queue->tail_ptr;
208 queue->tail_ptr = (void *)((char *) queue->tail_ptr + real_size);
212 event_type, (ptr_t) queue->start_ptr,
213 (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
214 (ptr_t) queue->end_ptr, (ptr_t) elem, real_size);
215 } else if ((char *) queue->head_ptr - real_size >=
216 (char *) queue->start_ptr) {
218 ((void *)((char *) queue->head_ptr - real_size));
219 queue->head_ptr = elem;
223 event_type, (ptr_t) queue->start_ptr,
224 (ptr_t) queue->head_ptr, (ptr_t) queue->tail_ptr,
225 (ptr_t) queue->end_ptr, real_size);
229 if (queue->pages_current >= queue->pages_max) {
230 vxge_os_spin_unlock_irq(&queue->lock, flags);
236 if (queue->has_critical_event) {
237 vxge_os_spin_unlock_irq(&queue->lock, flags);
246 vxge_os_spin_unlock_irq(&queue->lock, flags);
254 vxge_assert(queue->tail_ptr >= queue->head_ptr);
255 vxge_assert(queue->tail_ptr >= queue->start_ptr &&
256 queue->tail_ptr <= queue->end_ptr);
257 vxge_assert(queue->head_ptr >= queue->start_ptr &&
258 queue->head_ptr < queue->end_ptr);
263 queue->has_critical_event = 1;
266 vxge_list_insert_before(&elem->item, &queue->list_head);
267 vxge_os_spin_unlock_irq(&queue->lock, flags);
270 queue->queued_func(queue->queued_data, event_type);
280 * vxge_queue_create - Create protected first-in-first-out queue.
283 * time of queue creation.
284 * @pages_max: Max number of pages that can be allocated in the queue.
286 * added to the queue.
289 * Create protected (fifo) queue.
303 vxge_queue_t *queue;
317 if ((queue = (vxge_queue_t *) vxge_os_malloc(hldev->header.pdev,
324 queue->queued_func = queued_func;
325 queue->queued_data = queued_data;
326 queue->hldev = devh;
327 queue->pdev = hldev->header.pdev;
328 queue->irqh = hldev->header.irqh;
329 queue->pages_current = pages_initial;
330 queue->start_ptr = vxge_os_malloc(hldev->header.pdev,
331 queue->pages_current * VXGE_QUEUE_BUF_SIZE);
332 if (queue->start_ptr == NULL) {
333 vxge_os_free(hldev->header.pdev, queue, sizeof(vxge_queue_t));
338 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
339 queue->end_ptr = (char *) queue->start_ptr +
340 queue->pages_current * VXGE_QUEUE_BUF_SIZE;
341 vxge_os_spin_lock_init_irq(&queue->lock, queue->irqh);
342 queue->pages_initial = pages_initial;
343 queue->pages_max = pages_max;
344 vxge_list_init(&queue->list_head);
349 return (queue);
363 vxge_queue_t *queue = (vxge_queue_t *) queueh;
368 hldev = (__hal_device_t *) queue->hldev;
376 vxge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
377 if (!vxge_list_is_empty(&queue->list_head)) {
378 vxge_hal_trace_log_queue("destroying non-empty queue 0x"
379 VXGE_OS_STXFMT, (ptr_t) queue);
381 vxge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
384 vxge_os_free(queue->pdev, queue, sizeof(vxge_queue_t));
391 * vxge_io_queue_grow - Dynamically increases the size of the queue.
394 * This function is called in the case of no slot avaialble in the queue
396 * Note that queue cannot grow beyond the max size specified for the
397 * queue.
405 vxge_queue_t *queue = (vxge_queue_t *) queueh;
413 hldev = (__hal_device_t *) queue->hldev;
421 vxge_hal_info_log_queue("queue 0x"VXGE_OS_STXFMT":%d is growing",
422 (ptr_t) queue, queue->pages_current);
424 newbuf = vxge_os_malloc(queue->pdev,
425 (queue->pages_current + 1) * VXGE_QUEUE_BUF_SIZE);
432 vxge_os_memcpy(newbuf, queue->start_ptr,
433 queue->pages_current * VXGE_QUEUE_BUF_SIZE);
434 oldbuf = queue->start_ptr;
436 /* adjust queue sizes */
437 queue->start_ptr = newbuf;
438 queue->end_ptr = (char *) newbuf +
439 (queue->pages_current + 1) * VXGE_QUEUE_BUF_SIZE;
440 queue->tail_ptr = (char *) newbuf +
442 ((char *) queue->tail_ptr - (char *) oldbuf);
443 queue->head_ptr = (char *) newbuf +
445 ((char *) queue->head_ptr - (char *) oldbuf);
446 vxge_assert(!vxge_list_is_empty(&queue->list_head));
447 queue->list_head.next = (vxge_list_t *) (void *)((char *) newbuf +
449 ((char *) queue->list_head.next - (char *) oldbuf));
450 queue->list_head.prev = (vxge_list_t *) (void *)((char *) newbuf +
452 ((char *) queue->list_head.prev - (char *) oldbuf));
453 /* adjust queue list */
454 vxge_list_for_each(item, &queue->list_head) {
456 if (elem->item.next != &queue->list_head) {
462 if (elem->item.prev != &queue->list_head) {
469 vxge_os_free(queue->pdev, oldbuf,
470 queue->pages_current * VXGE_QUEUE_BUF_SIZE);
471 queue->pages_current++;
479 * vxge_queue_consume - Dequeue an item from the specified queue.
485 * Dequeue an item from the queue. The caller is required to provide
491 * is too small to accomodate an item from the queue.
500 vxge_queue_t *queue = (vxge_queue_t *) queueh;
507 hldev = (__hal_device_t *) queue->hldev;
517 vxge_os_spin_lock_irq(&queue->lock, flags);
518 status = __queue_consume(queue, data_max_size, item);
519 vxge_os_spin_unlock_irq(&queue->lock, flags);
528 * vxge_queue_flush - Flush, or empty, the queue.
531 * Flush the queue, i.e. make it empty by consuming all events
540 vxge_queue_t *queue = (vxge_queue_t *) queueh;
545 hldev = (__hal_device_t *) queue->hldev;
553 /* flush queue by consuming all enqueued items */
568 * vxge_queue_get_reset_critical - Check for critical events in the queue,
571 * Check for critical event(s) in the queue, and reset the
573 * Returns: 1 - if the queue contains atleast one critical event.
574 * 0 - If there are no critical events in the queue.
579 vxge_queue_t *queue = (vxge_queue_t *) queueh;
580 int c = queue->has_critical_event;
585 hldev = (__hal_device_t *) queue->hldev;
593 queue->has_critical_event = 0;