xge-queue.c revision 171095
1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/nxge/xgehal/xge-queue.c 171095 2007-06-29 22:47:18Z sam $
27 */
28
29/*
30 *  FileName :    xge-queue.c
31 *
32 *  Description:  serialized event queue
33 *
34 *  Created:      7 June 2004
35 */
36
37#include <dev/nxge/include/xge-queue.h>
38
39/**
40 * xge_queue_item_data - Get item's data.
41 * @item: Queue item.
42 *
43 * Returns:  item data(variable size). Note that xge_queue_t
44 * contains items comprized of a fixed xge_queue_item_t "header"
45 * and a variable size data. This function returns the variable
46 * user-defined portion of the queue item.
47 */
48void* xge_queue_item_data(xge_queue_item_t *item)
49{
50	return (char *)item + sizeof(xge_queue_item_t);
51}
52
53/*
54 * __queue_consume - (Lockless) dequeue an item from the specified queue.
55 *
56 * @queue: Event queue.
57 * See xge_queue_consume().
58 */
59static xge_queue_status_e
60__queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
61{
62	int real_size;
63	xge_queue_item_t *elem;
64
65	if (xge_list_is_empty(&queue->list_head))
66		return XGE_QUEUE_IS_EMPTY;
67
68	elem = (xge_queue_item_t *)queue->list_head.next;
69	if (elem->data_size > data_max_size)
70		return XGE_QUEUE_NOT_ENOUGH_SPACE;
71
72	xge_list_remove(&elem->item);
73	real_size = elem->data_size + sizeof(xge_queue_item_t);
74	if (queue->head_ptr == elem) {
75		queue->head_ptr = (char *)queue->head_ptr + real_size;
76		xge_debug_queue(XGE_TRACE,
77			"event_type: %d removing from the head: "
78			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
79			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
80			elem->event_type,
81			(u64)(ulong_t)queue->start_ptr,
82			(u64)(ulong_t)queue->head_ptr,
83			(u64)(ulong_t)queue->tail_ptr,
84			(u64)(ulong_t)queue->end_ptr,
85			(u64)(ulong_t)elem,
86			real_size);
87	} else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
88		queue->tail_ptr = (char *)queue->tail_ptr - real_size;
89		xge_debug_queue(XGE_TRACE,
90			"event_type: %d removing from the tail: "
91			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
92			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
93			elem->event_type,
94			(u64)(ulong_t)queue->start_ptr,
95			(u64)(ulong_t)queue->head_ptr,
96			(u64)(ulong_t)queue->tail_ptr,
97			(u64)(ulong_t)queue->end_ptr,
98			(u64)(ulong_t)elem,
99			real_size);
100	} else {
101		xge_debug_queue(XGE_TRACE,
102			"event_type: %d removing from the list: "
103			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
104			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
105			elem->event_type,
106			(u64)(ulong_t)queue->start_ptr,
107			(u64)(ulong_t)queue->head_ptr,
108			(u64)(ulong_t)queue->tail_ptr,
109			(u64)(ulong_t)queue->end_ptr,
110			(u64)(ulong_t)elem,
111			real_size);
112	}
113	xge_assert(queue->tail_ptr >= queue->head_ptr);
114	xge_assert(queue->tail_ptr >= queue->start_ptr &&
115		    queue->tail_ptr <= queue->end_ptr);
116	xge_assert(queue->head_ptr >= queue->start_ptr &&
117		    queue->head_ptr < queue->end_ptr);
118	xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
119	xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
120		    elem->data_size);
121
122	if (xge_list_is_empty(&queue->list_head)) {
123		/* reset buffer pointers just to be clean */
124		queue->head_ptr = queue->tail_ptr = queue->start_ptr;
125	}
126	return XGE_QUEUE_OK;
127}
128
129/**
130 * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
131 *                      into the specified queue.
132 * @queueh: Queue handle.
133 * @event_type: Event type. One of the enumerated event types
134 *              that both consumer and producer "understand".
135 *              For an example, please refer to xge_hal_event_e.
136 * @context: Opaque (void*) "context", for instance event producer object.
137 * @is_critical: For critical event, e.g. ECC.
138 * @data_size: Size of the @data.
139 * @data: User data of variable @data_size that is _copied_ into
140 *        the new queue item (see xge_queue_item_t{}). Upon return
141 *        from the call the @data memory can be re-used or released.
142 *
143 * Enqueue a new item.
144 *
145 * Returns: XGE_QUEUE_OK - success.
146 * XGE_QUEUE_IS_FULL - Queue is full.
147 * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
148 *
149 * See also: xge_queue_item_t{}, xge_queue_consume().
150 */
151xge_queue_status_e
152xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
153		int is_critical, const int data_size, void *data)
154{
155	xge_queue_t *queue = (xge_queue_t *)queueh;
156	int real_size = data_size + sizeof(xge_queue_item_t);
157	xge_queue_item_t *elem;
158	unsigned long flags = 0;
159
160	xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
161
162	xge_os_spin_lock_irq(&queue->lock, flags);
163
164	if (is_critical && !queue->has_critical_event)  {
165		unsigned char item_buf[sizeof(xge_queue_item_t) +
166				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
167		xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
168    xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
169                             XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
170
171	        while (__queue_consume(queue,
172				       XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
173				       item) != XGE_QUEUE_IS_EMPTY)
174		        ; /* do nothing */
175	}
176
177try_again:
178	if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
179        elem = (xge_queue_item_t *) queue->tail_ptr;
180		queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
181		xge_debug_queue(XGE_TRACE,
182			"event_type: %d adding to the tail: "
183			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
184			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
185			event_type,
186			(u64)(ulong_t)queue->start_ptr,
187			(u64)(ulong_t)queue->head_ptr,
188			(u64)(ulong_t)queue->tail_ptr,
189			(u64)(ulong_t)queue->end_ptr,
190			(u64)(ulong_t)elem,
191			real_size);
192	} else if ((char *)queue->head_ptr - real_size >=
193					(char *)queue->start_ptr) {
194        elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
195		queue->head_ptr = elem;
196		xge_debug_queue(XGE_TRACE,
197			"event_type: %d adding to the head: "
198			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
199			":0x"XGE_OS_LLXFMT" length %d",
200			event_type,
201			(u64)(ulong_t)queue->start_ptr,
202			(u64)(ulong_t)queue->head_ptr,
203			(u64)(ulong_t)queue->tail_ptr,
204			(u64)(ulong_t)queue->end_ptr,
205			real_size);
206	} else {
207		xge_queue_status_e status;
208
209		if (queue->pages_current >= queue->pages_max) {
210			xge_os_spin_unlock_irq(&queue->lock, flags);
211			return XGE_QUEUE_IS_FULL;
212		}
213
214		if (queue->has_critical_event) {
215   		xge_os_spin_unlock_irq(&queue->lock, flags);
216			return XGE_QUEUE_IS_FULL;
217    }
218
219		/* grow */
220		status = __io_queue_grow(queueh);
221		if (status != XGE_QUEUE_OK) {
222			xge_os_spin_unlock_irq(&queue->lock, flags);
223			return status;
224		}
225
226		goto try_again;
227	}
228	xge_assert(queue->tail_ptr >= queue->head_ptr);
229	xge_assert(queue->tail_ptr >= queue->start_ptr &&
230		    queue->tail_ptr <= queue->end_ptr);
231	xge_assert(queue->head_ptr >= queue->start_ptr &&
232		    queue->head_ptr < queue->end_ptr);
233	elem->data_size = data_size;
234    elem->event_type = (xge_hal_event_e) event_type;
235	elem->is_critical = is_critical;
236	if (is_critical)
237	        queue->has_critical_event = 1;
238	elem->context = context;
239	xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
240	xge_list_insert_before(&elem->item, &queue->list_head);
241	xge_os_spin_unlock_irq(&queue->lock, flags);
242
243	/* no lock taken! */
244	queue->queued_func(queue->queued_data, event_type);
245
246	return XGE_QUEUE_OK;
247}
248
249
250/**
251 * xge_queue_create - Create protected first-in-first-out queue.
252 * @pdev: PCI device handle.
253 * @irqh: PCI device IRQ handle.
254 * @pages_initial: Number of pages to be initially allocated at the
255 * time of queue creation.
256 * @pages_max: Max number of pages that can be allocated in the queue.
257 * @queued: Optional callback function to be called each time a new item is
258 * added to the queue.
259 * @queued_data: Argument to the callback function.
260 *
261 * Create protected (fifo) queue.
262 *
263 * Returns: Pointer to xge_queue_t structure,
264 * NULL - on failure.
265 *
266 * See also: xge_queue_item_t{}, xge_queue_destroy().
267 */
268xge_queue_h
269xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
270		int pages_max, xge_queued_f queued, void *queued_data)
271{
272	xge_queue_t *queue;
273
274    if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
275		return NULL;
276
277	queue->queued_func = queued;
278	queue->queued_data = queued_data;
279	queue->pdev = pdev;
280	queue->irqh = irqh;
281	queue->pages_current = pages_initial;
282	queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
283	                               XGE_QUEUE_BUF_SIZE);
284	if (queue->start_ptr == NULL) {
285		xge_os_free(pdev, queue, sizeof(xge_queue_t));
286		return NULL;
287	}
288	queue->head_ptr = queue->tail_ptr = queue->start_ptr;
289	queue->end_ptr = (char *)queue->start_ptr +
290		queue->pages_current * XGE_QUEUE_BUF_SIZE;
291	xge_os_spin_lock_init_irq(&queue->lock, irqh);
292	queue->pages_initial = pages_initial;
293	queue->pages_max = pages_max;
294	xge_list_init(&queue->list_head);
295
296	return queue;
297}
298
299/**
300 * xge_queue_destroy - Destroy xge_queue_t object.
301 * @queueh: Queue handle.
302 *
303 * Destroy the specified xge_queue_t object.
304 *
305 * See also: xge_queue_item_t{}, xge_queue_create().
306 */
307void xge_queue_destroy(xge_queue_h queueh)
308{
309	xge_queue_t *queue = (xge_queue_t *)queueh;
310	xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
311	if (!xge_list_is_empty(&queue->list_head)) {
312		xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
313				XGE_OS_LLXFMT, (u64)(ulong_t)queue);
314	}
315	xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
316	          XGE_QUEUE_BUF_SIZE);
317
318	xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
319}
320
321/*
322 * __io_queue_grow - Dynamically increases the size of the queue.
323 * @queueh: Queue handle.
324 *
325 * This function is called in the case of no slot avaialble in the queue
326 * to accomodate the newly received event.
327 * Note that queue cannot grow beyond the max size specified for the
328 * queue.
329 *
330 * Returns XGE_QUEUE_OK: On success.
331 * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
332 */
333xge_queue_status_e
334__io_queue_grow(xge_queue_h queueh)
335{
336	xge_queue_t *queue = (xge_queue_t *)queueh;
337	void *newbuf, *oldbuf;
338	xge_list_t *item;
339	xge_queue_item_t *elem;
340
341	xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
342			 (u64)(ulong_t)queue, queue->pages_current);
343
344	newbuf = xge_os_malloc(queue->pdev,
345	        (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
346	if (newbuf == NULL)
347		return XGE_QUEUE_OUT_OF_MEMORY;
348
349	xge_os_memcpy(newbuf, queue->start_ptr,
350	       queue->pages_current * XGE_QUEUE_BUF_SIZE);
351	oldbuf = queue->start_ptr;
352
353	/* adjust queue sizes */
354	queue->start_ptr = newbuf;
355	queue->end_ptr = (char *)newbuf +
356			(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
357	queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
358					    (char *)oldbuf);
359	queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
360					    (char *)oldbuf);
361	xge_assert(!xge_list_is_empty(&queue->list_head));
362	queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
363			((char *)queue->list_head.next - (char *)oldbuf));
364	queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
365			((char *)queue->list_head.prev - (char *)oldbuf));
366	/* adjust queue list */
367	xge_list_for_each(item, &queue->list_head) {
368		elem = xge_container_of(item, xge_queue_item_t, item);
369		if (elem->item.next != &queue->list_head) {
370			elem->item.next =
371				(xge_list_t*)(void *)((char *)newbuf +
372				 ((char *)elem->item.next - (char *)oldbuf));
373		}
374		if (elem->item.prev != &queue->list_head) {
375			elem->item.prev =
376				(xge_list_t*) (void *)((char *)newbuf +
377				 ((char *)elem->item.prev - (char *)oldbuf));
378		}
379	}
380	xge_os_free(queue->pdev, oldbuf,
381		  queue->pages_current * XGE_QUEUE_BUF_SIZE);
382	queue->pages_current++;
383
384	return XGE_QUEUE_OK;
385}
386
387/**
388 * xge_queue_consume - Dequeue an item from the specified queue.
389 * @queueh: Queue handle.
390 * @data_max_size: Maximum expected size of the item.
391 * @item: Memory area into which the item is _copied_ upon return
392 *        from the function.
393 *
394 * Dequeue an item from the queue. The caller is required to provide
395 * enough space for the item.
396 *
397 * Returns: XGE_QUEUE_OK - success.
398 * XGE_QUEUE_IS_EMPTY - Queue is empty.
399 * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
400 * is too small to accomodate an item from the queue.
401 *
402 * See also: xge_queue_item_t{}, xge_queue_produce().
403 */
404xge_queue_status_e
405xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
406{
407	xge_queue_t *queue = (xge_queue_t *)queueh;
408	unsigned long flags = 0;
409	xge_queue_status_e status;
410
411	xge_os_spin_lock_irq(&queue->lock, flags);
412	status = __queue_consume(queue, data_max_size, item);
413	xge_os_spin_unlock_irq(&queue->lock, flags);
414
415	return status;
416}
417
418
419/**
420 * xge_queue_flush - Flush, or empty, the queue.
421 * @queueh: Queue handle.
422 *
423 * Flush the queue, i.e. make it empty by consuming all events
424 * without invoking the event processing logic (callbacks, etc.)
425 */
426void xge_queue_flush(xge_queue_h queueh)
427{
428	unsigned char item_buf[sizeof(xge_queue_item_t) +
429				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
430	xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
431  xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
432                             XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
433
434	/* flush queue by consuming all enqueued items */
435	while (xge_queue_consume(queueh,
436				    XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
437				    item) != XGE_QUEUE_IS_EMPTY) {
438		/* do nothing */
439		xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
440				 item, item->event_type);
441	}
442	(void) __queue_get_reset_critical (queueh);
443}
444
445/*
446 * __queue_get_reset_critical - Check for critical events in the queue,
447 * @qh: Queue handle.
448 *
449 * Check for critical event(s) in the queue, and reset the
450 * "has-critical-event" flag upon return.
451 * Returns: 1 - if the queue contains atleast one critical event.
452 * 0 - If there are no critical events in the queue.
453 */
454int __queue_get_reset_critical (xge_queue_h qh) {
455	xge_queue_t* queue = (xge_queue_t*)qh;
456	int c = queue->has_critical_event;
457
458	queue->has_critical_event = 0;
459        return c;
460}
461