1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <dev/nxge/include/xge-queue.h>
30
31/**
32 * xge_queue_item_data - Get item's data.
33 * @item: Queue item.
34 *
35 * Returns:  item data(variable size). Note that xge_queue_t
36 * contains items comprized of a fixed xge_queue_item_t "header"
37 * and a variable size data. This function returns the variable
38 * user-defined portion of the queue item.
39 */
40void* xge_queue_item_data(xge_queue_item_t *item)
41{
42	return (char *)item + sizeof(xge_queue_item_t);
43}
44
45/*
46 * __queue_consume - (Lockless) dequeue an item from the specified queue.
47 *
48 * @queue: Event queue.
49 * See xge_queue_consume().
50 */
51static xge_queue_status_e
52__queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
53{
54	int real_size;
55	xge_queue_item_t *elem;
56
57	if (xge_list_is_empty(&queue->list_head))
58	    return XGE_QUEUE_IS_EMPTY;
59
60	elem = (xge_queue_item_t *)queue->list_head.next;
61	if (elem->data_size > data_max_size)
62	    return XGE_QUEUE_NOT_ENOUGH_SPACE;
63
64	xge_list_remove(&elem->item);
65	real_size = elem->data_size + sizeof(xge_queue_item_t);
66	if (queue->head_ptr == elem) {
67	    queue->head_ptr = (char *)queue->head_ptr + real_size;
68	    xge_debug_queue(XGE_TRACE,
69	        "event_type: %d removing from the head: "
70	        "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
71	        ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
72	        elem->event_type,
73	        (u64)(ulong_t)queue->start_ptr,
74	        (u64)(ulong_t)queue->head_ptr,
75	        (u64)(ulong_t)queue->tail_ptr,
76	        (u64)(ulong_t)queue->end_ptr,
77	        (u64)(ulong_t)elem,
78	        real_size);
79	} else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
80	    queue->tail_ptr = (char *)queue->tail_ptr - real_size;
81	    xge_debug_queue(XGE_TRACE,
82	        "event_type: %d removing from the tail: "
83	        "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
84	        ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
85	        elem->event_type,
86	        (u64)(ulong_t)queue->start_ptr,
87	        (u64)(ulong_t)queue->head_ptr,
88	        (u64)(ulong_t)queue->tail_ptr,
89	        (u64)(ulong_t)queue->end_ptr,
90	        (u64)(ulong_t)elem,
91	        real_size);
92	} else {
93	    xge_debug_queue(XGE_TRACE,
94	        "event_type: %d removing from the list: "
95	        "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
96	        ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
97	        elem->event_type,
98	        (u64)(ulong_t)queue->start_ptr,
99	        (u64)(ulong_t)queue->head_ptr,
100	        (u64)(ulong_t)queue->tail_ptr,
101	        (u64)(ulong_t)queue->end_ptr,
102	        (u64)(ulong_t)elem,
103	        real_size);
104	}
105	xge_assert(queue->tail_ptr >= queue->head_ptr);
106	xge_assert(queue->tail_ptr >= queue->start_ptr &&
107	        queue->tail_ptr <= queue->end_ptr);
108	xge_assert(queue->head_ptr >= queue->start_ptr &&
109	        queue->head_ptr < queue->end_ptr);
110	xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
111	xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
112	        elem->data_size);
113
114	if (xge_list_is_empty(&queue->list_head)) {
115	    /* reset buffer pointers just to be clean */
116	    queue->head_ptr = queue->tail_ptr = queue->start_ptr;
117	}
118	return XGE_QUEUE_OK;
119}
120
121/**
122 * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
123 *                      into the specified queue.
124 * @queueh: Queue handle.
125 * @event_type: Event type. One of the enumerated event types
126 *              that both consumer and producer "understand".
127 *              For an example, please refer to xge_hal_event_e.
128 * @context: Opaque (void*) "context", for instance event producer object.
129 * @is_critical: For critical event, e.g. ECC.
130 * @data_size: Size of the @data.
131 * @data: User data of variable @data_size that is _copied_ into
132 *        the new queue item (see xge_queue_item_t{}). Upon return
133 *        from the call the @data memory can be re-used or released.
134 *
135 * Enqueue a new item.
136 *
137 * Returns: XGE_QUEUE_OK - success.
138 * XGE_QUEUE_IS_FULL - Queue is full.
139 * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
140 *
141 * See also: xge_queue_item_t{}, xge_queue_consume().
142 */
143xge_queue_status_e
144xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
145	    int is_critical, const int data_size, void *data)
146{
147	xge_queue_t *queue = (xge_queue_t *)queueh;
148	int real_size = data_size + sizeof(xge_queue_item_t);
149	xge_queue_item_t *elem;
150	unsigned long flags = 0;
151
152	xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
153
154	xge_os_spin_lock_irq(&queue->lock, flags);
155
156	if (is_critical && !queue->has_critical_event)  {
157	    unsigned char item_buf[sizeof(xge_queue_item_t) +
158	            XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
159	    xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
160	xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
161	                         XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
162
163	        while (__queue_consume(queue,
164	                   XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
165	                   item) != XGE_QUEUE_IS_EMPTY)
166	            ; /* do nothing */
167	}
168
169try_again:
170	if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
171	    elem = (xge_queue_item_t *) queue->tail_ptr;
172	    queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
173	    xge_debug_queue(XGE_TRACE,
174	        "event_type: %d adding to the tail: "
175	        "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
176	        ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
177	        event_type,
178	        (u64)(ulong_t)queue->start_ptr,
179	        (u64)(ulong_t)queue->head_ptr,
180	        (u64)(ulong_t)queue->tail_ptr,
181	        (u64)(ulong_t)queue->end_ptr,
182	        (u64)(ulong_t)elem,
183	        real_size);
184	} else if ((char *)queue->head_ptr - real_size >=
185	                (char *)queue->start_ptr) {
186	    elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
187	    queue->head_ptr = elem;
188	    xge_debug_queue(XGE_TRACE,
189	        "event_type: %d adding to the head: "
190	        "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
191	        ":0x"XGE_OS_LLXFMT" length %d",
192	        event_type,
193	        (u64)(ulong_t)queue->start_ptr,
194	        (u64)(ulong_t)queue->head_ptr,
195	        (u64)(ulong_t)queue->tail_ptr,
196	        (u64)(ulong_t)queue->end_ptr,
197	        real_size);
198	} else {
199	    xge_queue_status_e status;
200
201	    if (queue->pages_current >= queue->pages_max) {
202	        xge_os_spin_unlock_irq(&queue->lock, flags);
203	        return XGE_QUEUE_IS_FULL;
204	    }
205
206	    if (queue->has_critical_event) {
207	    xge_os_spin_unlock_irq(&queue->lock, flags);
208	        return XGE_QUEUE_IS_FULL;
209	}
210
211	    /* grow */
212	    status = __io_queue_grow(queueh);
213	    if (status != XGE_QUEUE_OK) {
214	        xge_os_spin_unlock_irq(&queue->lock, flags);
215	        return status;
216	    }
217
218	    goto try_again;
219	}
220	xge_assert(queue->tail_ptr >= queue->head_ptr);
221	xge_assert(queue->tail_ptr >= queue->start_ptr &&
222	        queue->tail_ptr <= queue->end_ptr);
223	xge_assert(queue->head_ptr >= queue->start_ptr &&
224	        queue->head_ptr < queue->end_ptr);
225	elem->data_size = data_size;
226	elem->event_type = (xge_hal_event_e) event_type;
227	elem->is_critical = is_critical;
228	if (is_critical)
229	        queue->has_critical_event = 1;
230	elem->context = context;
231	xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
232	xge_list_insert_before(&elem->item, &queue->list_head);
233	xge_os_spin_unlock_irq(&queue->lock, flags);
234
235	/* no lock taken! */
236	queue->queued_func(queue->queued_data, event_type);
237
238	return XGE_QUEUE_OK;
239}
240
241
242/**
243 * xge_queue_create - Create protected first-in-first-out queue.
244 * @pdev: PCI device handle.
245 * @irqh: PCI device IRQ handle.
246 * @pages_initial: Number of pages to be initially allocated at the
247 * time of queue creation.
248 * @pages_max: Max number of pages that can be allocated in the queue.
249 * @queued: Optional callback function to be called each time a new item is
250 * added to the queue.
251 * @queued_data: Argument to the callback function.
252 *
253 * Create protected (fifo) queue.
254 *
255 * Returns: Pointer to xge_queue_t structure,
256 * NULL - on failure.
257 *
258 * See also: xge_queue_item_t{}, xge_queue_destroy().
259 */
260xge_queue_h
261xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
262	    int pages_max, xge_queued_f queued, void *queued_data)
263{
264	xge_queue_t *queue;
265
266	if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
267	    return NULL;
268
269	queue->queued_func = queued;
270	queue->queued_data = queued_data;
271	queue->pdev = pdev;
272	queue->irqh = irqh;
273	queue->pages_current = pages_initial;
274	queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
275	                               XGE_QUEUE_BUF_SIZE);
276	if (queue->start_ptr == NULL) {
277	    xge_os_free(pdev, queue, sizeof(xge_queue_t));
278	    return NULL;
279	}
280	queue->head_ptr = queue->tail_ptr = queue->start_ptr;
281	queue->end_ptr = (char *)queue->start_ptr +
282	    queue->pages_current * XGE_QUEUE_BUF_SIZE;
283	xge_os_spin_lock_init_irq(&queue->lock, irqh);
284	queue->pages_initial = pages_initial;
285	queue->pages_max = pages_max;
286	xge_list_init(&queue->list_head);
287
288	return queue;
289}
290
291/**
292 * xge_queue_destroy - Destroy xge_queue_t object.
293 * @queueh: Queue handle.
294 *
295 * Destroy the specified xge_queue_t object.
296 *
297 * See also: xge_queue_item_t{}, xge_queue_create().
298 */
299void xge_queue_destroy(xge_queue_h queueh)
300{
301	xge_queue_t *queue = (xge_queue_t *)queueh;
302	xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
303	if (!xge_list_is_empty(&queue->list_head)) {
304	    xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
305	            XGE_OS_LLXFMT, (u64)(ulong_t)queue);
306	}
307	xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
308	          XGE_QUEUE_BUF_SIZE);
309
310	xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
311}
312
313/*
314 * __io_queue_grow - Dynamically increases the size of the queue.
315 * @queueh: Queue handle.
316 *
317 * This function is called in the case of no slot avaialble in the queue
318 * to accomodate the newly received event.
319 * Note that queue cannot grow beyond the max size specified for the
320 * queue.
321 *
322 * Returns XGE_QUEUE_OK: On success.
323 * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
324 */
325xge_queue_status_e
326__io_queue_grow(xge_queue_h queueh)
327{
328	xge_queue_t *queue = (xge_queue_t *)queueh;
329	void *newbuf, *oldbuf;
330	xge_list_t *item;
331	xge_queue_item_t *elem;
332
333	xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
334	         (u64)(ulong_t)queue, queue->pages_current);
335
336	newbuf = xge_os_malloc(queue->pdev,
337	        (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
338	if (newbuf == NULL)
339	    return XGE_QUEUE_OUT_OF_MEMORY;
340
341	xge_os_memcpy(newbuf, queue->start_ptr,
342	       queue->pages_current * XGE_QUEUE_BUF_SIZE);
343	oldbuf = queue->start_ptr;
344
345	/* adjust queue sizes */
346	queue->start_ptr = newbuf;
347	queue->end_ptr = (char *)newbuf +
348	        (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
349	queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
350	                    (char *)oldbuf);
351	queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
352	                    (char *)oldbuf);
353	xge_assert(!xge_list_is_empty(&queue->list_head));
354	queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
355	        ((char *)queue->list_head.next - (char *)oldbuf));
356	queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
357	        ((char *)queue->list_head.prev - (char *)oldbuf));
358	/* adjust queue list */
359	xge_list_for_each(item, &queue->list_head) {
360	    elem = xge_container_of(item, xge_queue_item_t, item);
361	    if (elem->item.next != &queue->list_head) {
362	        elem->item.next =
363	            (xge_list_t*)(void *)((char *)newbuf +
364	             ((char *)elem->item.next - (char *)oldbuf));
365	    }
366	    if (elem->item.prev != &queue->list_head) {
367	        elem->item.prev =
368	            (xge_list_t*) (void *)((char *)newbuf +
369	             ((char *)elem->item.prev - (char *)oldbuf));
370	    }
371	}
372	xge_os_free(queue->pdev, oldbuf,
373	      queue->pages_current * XGE_QUEUE_BUF_SIZE);
374	queue->pages_current++;
375
376	return XGE_QUEUE_OK;
377}
378
379/**
380 * xge_queue_consume - Dequeue an item from the specified queue.
381 * @queueh: Queue handle.
382 * @data_max_size: Maximum expected size of the item.
383 * @item: Memory area into which the item is _copied_ upon return
384 *        from the function.
385 *
386 * Dequeue an item from the queue. The caller is required to provide
387 * enough space for the item.
388 *
389 * Returns: XGE_QUEUE_OK - success.
390 * XGE_QUEUE_IS_EMPTY - Queue is empty.
391 * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
392 * is too small to accomodate an item from the queue.
393 *
394 * See also: xge_queue_item_t{}, xge_queue_produce().
395 */
396xge_queue_status_e
397xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
398{
399	xge_queue_t *queue = (xge_queue_t *)queueh;
400	unsigned long flags = 0;
401	xge_queue_status_e status;
402
403	xge_os_spin_lock_irq(&queue->lock, flags);
404	status = __queue_consume(queue, data_max_size, item);
405	xge_os_spin_unlock_irq(&queue->lock, flags);
406
407	return status;
408}
409
410
411/**
412 * xge_queue_flush - Flush, or empty, the queue.
413 * @queueh: Queue handle.
414 *
415 * Flush the queue, i.e. make it empty by consuming all events
416 * without invoking the event processing logic (callbacks, etc.)
417 */
418void xge_queue_flush(xge_queue_h queueh)
419{
420	unsigned char item_buf[sizeof(xge_queue_item_t) +
421	            XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
422	xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
423  xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
424	                         XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
425
426	/* flush queue by consuming all enqueued items */
427	while (xge_queue_consume(queueh,
428	                XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
429	                item) != XGE_QUEUE_IS_EMPTY) {
430	    /* do nothing */
431	    xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
432	             item, item->event_type);
433	}
434	(void) __queue_get_reset_critical (queueh);
435}
436
437/*
438 * __queue_get_reset_critical - Check for critical events in the queue,
439 * @qh: Queue handle.
440 *
441 * Check for critical event(s) in the queue, and reset the
442 * "has-critical-event" flag upon return.
443 * Returns: 1 - if the queue contains atleast one critical event.
444 * 0 - If there are no critical events in the queue.
445 */
446int __queue_get_reset_critical (xge_queue_h qh) {
447	xge_queue_t* queue = (xge_queue_t*)qh;
448	int c = queue->has_critical_event;
449
450	queue->has_critical_event = 0;
451	    return c;
452}
453