1/*- 2 * Copyright (c) 2002-2007 Neterion, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#ifndef XGE_QUEUE_H 30#define XGE_QUEUE_H 31 32#include <dev/nxge/include/xge-os-pal.h> 33#include <dev/nxge/include/xge-defs.h> 34#include <dev/nxge/include/xge-list.h> 35#include <dev/nxge/include/xgehal-event.h> 36 37__EXTERN_BEGIN_DECLS 38 39#define XGE_QUEUE_BUF_SIZE 0x1000 40#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16 41 42/** 43 * enum xge_queue_status_e - Enumerates return codes of the xge_queue 44 * manipulation APIs. 45 * @XGE_QUEUE_IS_FULL: Queue is full, need to grow. 46 * @XGE_QUEUE_IS_EMPTY: Queue is empty. 47 * @XGE_QUEUE_OUT_OF_MEMORY: Out of memory. 48 * @XGE_QUEUE_NOT_ENOUGH_SPACE: Exceeded specified event size, 49 * see xge_queue_consume(). 50 * @XGE_QUEUE_OK: Neither one of the codes listed above. 51 * 52 * Enumerates return codes of xge_queue_consume() 53 * and xge_queue_produce() APIs. 54 */ 55typedef enum xge_queue_status_e { 56 XGE_QUEUE_OK = 0, 57 XGE_QUEUE_IS_FULL = 1, 58 XGE_QUEUE_IS_EMPTY = 2, 59 XGE_QUEUE_OUT_OF_MEMORY = 3, 60 XGE_QUEUE_NOT_ENOUGH_SPACE = 4 61} xge_queue_status_e; 62 63typedef void* xge_queue_h; 64 65/** 66 * struct xge_queue_item_t - Queue item. 67 * @item: List item. Note that the queue is "built" on top of 68 * the bi-directional linked list. 69 * @event_type: Event type. Includes (but is not restricted to) 70 * one of the xge_hal_event_e{} enumerated types. 71 * @data_size: Size of the enqueued user data. Note that xge_queue_t 72 * items are allowed to have variable sizes. 73 * @is_critical: For critical events, e.g. ECC. 74 * @context: Opaque (void*) "context", for instance event producer object. 75 * 76 * Item of the xge_queue_t{}. The queue is protected 77 * in terms of multi-threaded concurrent access. 78 * See also: xge_queue_t{}. 79 */ 80typedef struct xge_queue_item_t { 81 xge_list_t item; 82 xge_hal_event_e event_type; 83 int data_size; 84 int is_critical; 85 void *context; 86} xge_queue_item_t; 87 88/** 89 * function xge_queued_f - Item-enqueued callback. 90 * @data: Per-queue context independent of the event. E.g., device handle. 91 * @event_type: HAL or ULD-defined event type. Note that HAL own 92 * events are enumerated by xge_hal_event_e{}. 93 * 94 * Per-queue optional callback. If not NULL, called by HAL each 95 * time an event gets added to the queue. 96 */ 97typedef void (*xge_queued_f) (void *data, int event_type); 98 99/** 100 * struct xge_queue_t - Protected dynamic queue of variable-size items. 101 * @start_ptr: Points to the start of the queue. 102 * @end_ptr: Points to the end of the queue. 103 * @head_ptr: Points to the head of the queue. It gets changed during queue 104 * produce/consume operations. 105 * @tail_ptr: Points to the tail of the queue. It gets changed during queue 106 * produce/consume operations. 107 * @lock: Lock for queue operations(syncronization purpose). 108 * @pages_initial:Number of pages to be initially allocated at the time 109 * of queue creation. 110 * @pages_max: Max number of pages that can be allocated in the queue. 111 * @pages_current: Number of pages currently allocated 112 * @list_head: Points to the list of queue elements that are produced, but yet 113 * to be consumed. 114 * @signal_callback: (TODO) 115 * @pdev: PCI device handle 116 * @irqh: PCI device IRQ handle. 117 * @queued_func: Optional callback function to be called each time a new 118 * item is added to the queue. 119 * @queued_data: Arguments to the callback function. 120 * @has_critical_event: Non-zero, if the queue contains a critical event, 121 * see xge_hal_event_e{}. 122 * Protected dynamically growing queue. The queue is used to support multiple 123 * producer/consumer type scenarios. The queue is a strict FIFO: first come 124 * first served. 125 * Queue users may "produce" (see xge_queue_produce()) and "consume" 126 * (see xge_queue_consume()) items (a.k.a. events) variable sizes. 127 * See also: xge_queue_item_t{}. 128 */ 129typedef struct xge_queue_t { 130 void *start_ptr; 131 void *end_ptr; 132 void *head_ptr; 133 void *tail_ptr; 134 spinlock_t lock; 135 unsigned int pages_initial; 136 unsigned int pages_max; 137 unsigned int pages_current; 138 xge_list_t list_head; 139 pci_dev_h pdev; 140 pci_irq_h irqh; 141 xge_queued_f queued_func; 142 void *queued_data; 143 int has_critical_event; 144} xge_queue_t; 145 146/* ========================== PUBLIC API ================================= */ 147 148xge_queue_h xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial, 149 int pages_max, xge_queued_f queued_func, void *queued_data); 150 151void xge_queue_destroy(xge_queue_h queueh); 152 153void* xge_queue_item_data(xge_queue_item_t *item); 154 155xge_queue_status_e 156xge_queue_produce(xge_queue_h queueh, int event_type, void *context, 157 int is_critical, const int data_size, void *data); 158 159static inline xge_queue_status_e 160xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) { 161 return xge_queue_produce(queueh, event_type, context, 0, 0, 0); 162} 163 164xge_queue_status_e xge_queue_consume(xge_queue_h queueh, int data_max_size, 165 xge_queue_item_t *item); 166 167void xge_queue_flush(xge_queue_h queueh); 168 169/* ========================== PRIVATE API ================================= */ 170 171xge_queue_status_e __io_queue_grow(xge_queue_h qh); 172 173int __queue_get_reset_critical (xge_queue_h qh); 174 175__EXTERN_END_DECLS 176 177#endif /* XGE_QUEUE_H */ 178