1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <dev/nxge/include/xge-os-pal.h>
30#include <dev/nxge/include/xgehal-mm.h>
31#include <dev/nxge/include/xge-debug.h>
32
33/*
34 * __hal_mempool_grow
35 *
36 * Will resize mempool up to %num_allocate value.
37 */
38xge_hal_status_e
39__hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
40	    int *num_allocated)
41{
42	int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
43	int n_items = mempool->items_per_memblock;
44
45	*num_allocated = 0;
46
47	if ((mempool->memblocks_allocated + num_allocate) >
48	                    mempool->memblocks_max) {
49	    xge_debug_mm(XGE_ERR, "%s",
50	              "__hal_mempool_grow: can grow anymore");
51	    return XGE_HAL_ERR_OUT_OF_MEMORY;
52	}
53
54	for (i = mempool->memblocks_allocated;
55	     i < mempool->memblocks_allocated + num_allocate; i++) {
56	    int j;
57	    int is_last =
58	        ((mempool->memblocks_allocated+num_allocate-1) == i);
59	    xge_hal_mempool_dma_t *dma_object =
60	        mempool->memblocks_dma_arr + i;
61	    void *the_memblock;
62	    int dma_flags;
63
64	    dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
65#ifdef XGE_HAL_DMA_DTR_CONSISTENT
66	    dma_flags |= XGE_OS_DMA_CONSISTENT;
67#else
68	    dma_flags |= XGE_OS_DMA_STREAMING;
69#endif
70
71	    /* allocate DMA-capable memblock */
72	    mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
73	                        mempool->memblock_size,
74	                    dma_flags,
75	                        &dma_object->handle,
76	                        &dma_object->acc_handle);
77	    if (mempool->memblocks_arr[i] == NULL) {
78	        xge_debug_mm(XGE_ERR,
79	                  "memblock[%d]: out of DMA memory", i);
80	        return XGE_HAL_ERR_OUT_OF_MEMORY;
81	    }
82	    xge_os_memzero(mempool->memblocks_arr[i],
83	    mempool->memblock_size);
84	    the_memblock = mempool->memblocks_arr[i];
85
86	    /* allocate memblock's private part. Each DMA memblock
87	     * has a space allocated for item's private usage upon
88	     * mempool's user request. Each time mempool grows, it will
89	     * allocate new memblock and its private part at once.
90	     * This helps to minimize memory usage a lot. */
91	    mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
92	                mempool->items_priv_size * n_items);
93	    if (mempool->memblocks_priv_arr[i] == NULL) {
94	        xge_os_dma_free(mempool->pdev,
95	                  the_memblock,
96	                  mempool->memblock_size,
97	                  &dma_object->acc_handle,
98	                  &dma_object->handle);
99	        xge_debug_mm(XGE_ERR,
100	                "memblock_priv[%d]: out of virtual memory, "
101	                "requested %d(%d:%d) bytes", i,
102	            mempool->items_priv_size * n_items,
103	            mempool->items_priv_size, n_items);
104	        return XGE_HAL_ERR_OUT_OF_MEMORY;
105	    }
106	    xge_os_memzero(mempool->memblocks_priv_arr[i],
107	             mempool->items_priv_size * n_items);
108
109	    /* map memblock to physical memory */
110	    dma_object->addr = xge_os_dma_map(mempool->pdev,
111	                                    dma_object->handle,
112	                    the_memblock,
113	                    mempool->memblock_size,
114	                    XGE_OS_DMA_DIR_BIDIRECTIONAL,
115#ifdef XGE_HAL_DMA_DTR_CONSISTENT
116	                        XGE_OS_DMA_CONSISTENT
117#else
118	                        XGE_OS_DMA_STREAMING
119#endif
120	                                            );
121	    if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
122	        xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
123	              mempool->items_priv_size *
124	                n_items);
125	        xge_os_dma_free(mempool->pdev,
126	                  the_memblock,
127	                  mempool->memblock_size,
128	                  &dma_object->acc_handle,
129	                  &dma_object->handle);
130	        return XGE_HAL_ERR_OUT_OF_MAPPING;
131	    }
132
133	    /* fill the items hash array */
134	    for (j=0; j<n_items; j++) {
135	        int index = i*n_items + j;
136
137	        if (first_time && index >= mempool->items_initial) {
138	            break;
139	        }
140
141	        mempool->items_arr[index] =
142	            ((char *)the_memblock + j*mempool->item_size);
143
144	        /* let caller to do more job on each item */
145	        if (mempool->item_func_alloc != NULL) {
146	            xge_hal_status_e status;
147
148	            if ((status = mempool->item_func_alloc(
149	                mempool,
150	                the_memblock,
151	                i,
152	                dma_object,
153	                mempool->items_arr[index],
154	                index,
155	                is_last,
156	                mempool->userdata)) != XGE_HAL_OK) {
157
158	                if (mempool->item_func_free != NULL) {
159	                    int k;
160
161	                    for (k=0; k<j; k++) {
162
163	                        index =i*n_items + k;
164
165	                      (void)mempool->item_func_free(
166	                         mempool, the_memblock,
167	                         i, dma_object,
168	                         mempool->items_arr[index],
169	                         index, is_last,
170	                         mempool->userdata);
171	                    }
172	                }
173
174	                xge_os_free(mempool->pdev,
175	                     mempool->memblocks_priv_arr[i],
176	                     mempool->items_priv_size *
177	                     n_items);
178	                xge_os_dma_unmap(mempool->pdev,
179	                     dma_object->handle,
180	                     dma_object->addr,
181	                     mempool->memblock_size,
182	                     XGE_OS_DMA_DIR_BIDIRECTIONAL);
183	                xge_os_dma_free(mempool->pdev,
184	                     the_memblock,
185	                     mempool->memblock_size,
186	                     &dma_object->acc_handle,
187	                     &dma_object->handle);
188	                return status;
189	            }
190	        }
191
192	        mempool->items_current = index + 1;
193	    }
194
195	    xge_debug_mm(XGE_TRACE,
196	        "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
197	        "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
198	        (unsigned long long)(ulong_t)mempool->memblocks_arr[i],
199	        (unsigned long long)dma_object->addr);
200
201	    (*num_allocated)++;
202
203	    if (first_time && mempool->items_current ==
204	                    mempool->items_initial) {
205	        break;
206	    }
207	}
208
209	/* increment actual number of allocated memblocks */
210	mempool->memblocks_allocated += *num_allocated;
211
212	return XGE_HAL_OK;
213}
214
215/*
216 * xge_hal_mempool_create
217 * @memblock_size:
218 * @items_initial:
219 * @items_max:
220 * @item_size:
221 * @item_func:
222 *
223 * This function will create memory pool object. Pool may grow but will
224 * never shrink. Pool consists of number of dynamically allocated blocks
225 * with size enough to hold %items_initial number of items. Memory is
226 * DMA-able but client must map/unmap before interoperating with the device.
227 * See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}.
228 */
229xge_hal_mempool_t*
230__hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
231	    int items_priv_size, int items_initial, int items_max,
232	    xge_hal_mempool_item_f item_func_alloc,
233	    xge_hal_mempool_item_f item_func_free, void *userdata)
234{
235	xge_hal_status_e status;
236	int memblocks_to_allocate;
237	xge_hal_mempool_t *mempool;
238	int allocated;
239
240	if (memblock_size < item_size) {
241	    xge_debug_mm(XGE_ERR,
242	        "memblock_size %d < item_size %d: misconfiguration",
243	        memblock_size, item_size);
244	    return NULL;
245	}
246
247	mempool = (xge_hal_mempool_t *) \
248	        xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
249	if (mempool == NULL) {
250	    xge_debug_mm(XGE_ERR, "mempool allocation failure");
251	    return NULL;
252	}
253	xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));
254
255	mempool->pdev           = pdev;
256	mempool->memblock_size      = memblock_size;
257	mempool->items_max      = items_max;
258	mempool->items_initial      = items_initial;
259	mempool->item_size      = item_size;
260	mempool->items_priv_size    = items_priv_size;
261	mempool->item_func_alloc    = item_func_alloc;
262	mempool->item_func_free     = item_func_free;
263	mempool->userdata       = userdata;
264
265	mempool->memblocks_allocated = 0;
266
267	mempool->items_per_memblock = memblock_size / item_size;
268
269	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
270	                mempool->items_per_memblock;
271
272	/* allocate array of memblocks */
273	mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
274	                sizeof(void*) * mempool->memblocks_max);
275	if (mempool->memblocks_arr == NULL) {
276	    xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
277	    __hal_mempool_destroy(mempool);
278	    return NULL;
279	}
280	xge_os_memzero(mempool->memblocks_arr,
281	        sizeof(void*) * mempool->memblocks_max);
282
283	/* allocate array of private parts of items per memblocks */
284	mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
285	                sizeof(void*) * mempool->memblocks_max);
286	if (mempool->memblocks_priv_arr == NULL) {
287	    xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
288	    __hal_mempool_destroy(mempool);
289	    return NULL;
290	}
291	xge_os_memzero(mempool->memblocks_priv_arr,
292	        sizeof(void*) * mempool->memblocks_max);
293
294	/* allocate array of memblocks DMA objects */
295	mempool->memblocks_dma_arr =
296	    (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
297	    sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
298
299	if (mempool->memblocks_dma_arr == NULL) {
300	    xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
301	    __hal_mempool_destroy(mempool);
302	    return NULL;
303	}
304	xge_os_memzero(mempool->memblocks_dma_arr,
305	         sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
306
307	/* allocate hash array of items */
308	mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
309	             sizeof(void*) * mempool->items_max);
310	if (mempool->items_arr == NULL) {
311	    xge_debug_mm(XGE_ERR, "items_arr allocation failure");
312	    __hal_mempool_destroy(mempool);
313	    return NULL;
314	}
315	xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);
316
317	mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
318	                            sizeof(void*) *  mempool->items_max);
319	if (mempool->shadow_items_arr == NULL) {
320	    xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
321	    __hal_mempool_destroy(mempool);
322	    return NULL;
323	}
324	xge_os_memzero(mempool->shadow_items_arr,
325	         sizeof(void *) * mempool->items_max);
326
327	/* calculate initial number of memblocks */
328	memblocks_to_allocate = (mempool->items_initial +
329	             mempool->items_per_memblock - 1) /
330	                    mempool->items_per_memblock;
331
332	xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
333	        "%d items per memblock", memblocks_to_allocate,
334	        mempool->items_per_memblock);
335
336	/* pre-allocate the mempool */
337	status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
338	xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
339	        sizeof(void*) * mempool->items_max);
340	if (status != XGE_HAL_OK) {
341	    xge_debug_mm(XGE_ERR, "mempool_grow failure");
342	    __hal_mempool_destroy(mempool);
343	    return NULL;
344	}
345
346	xge_debug_mm(XGE_TRACE,
347	    "total: allocated %dk of DMA-capable memory",
348	    mempool->memblock_size * allocated / 1024);
349
350	return mempool;
351}
352
353/*
354 * xge_hal_mempool_destroy
355 */
356void
357__hal_mempool_destroy(xge_hal_mempool_t *mempool)
358{
359	int i, j;
360
361	for (i=0; i<mempool->memblocks_allocated; i++) {
362	    xge_hal_mempool_dma_t *dma_object;
363
364	    xge_assert(mempool->memblocks_arr[i]);
365	    xge_assert(mempool->memblocks_dma_arr + i);
366
367	    dma_object = mempool->memblocks_dma_arr + i;
368
369	    for (j=0; j<mempool->items_per_memblock; j++) {
370	        int index = i*mempool->items_per_memblock + j;
371
372	        /* to skip last partially filled(if any) memblock */
373	        if (index >= mempool->items_current) {
374	            break;
375	        }
376
377	        /* let caller to do more job on each item */
378	        if (mempool->item_func_free != NULL) {
379
380	            mempool->item_func_free(mempool,
381	                mempool->memblocks_arr[i],
382	                i, dma_object,
383	                mempool->shadow_items_arr[index],
384	                index, /* unused */ -1,
385	                mempool->userdata);
386	        }
387	    }
388
389	    xge_os_dma_unmap(mempool->pdev,
390	               dma_object->handle, dma_object->addr,
391	           mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
392
393	    xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
394	        mempool->items_priv_size * mempool->items_per_memblock);
395
396	    xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
397	              mempool->memblock_size, &dma_object->acc_handle,
398	              &dma_object->handle);
399	}
400
401	if (mempool->items_arr) {
402	    xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
403	              mempool->items_max);
404	}
405
406	if (mempool->shadow_items_arr) {
407	    xge_os_free(mempool->pdev, mempool->shadow_items_arr,
408	          sizeof(void*) * mempool->items_max);
409	}
410
411	if (mempool->memblocks_dma_arr) {
412	    xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
413	              sizeof(xge_hal_mempool_dma_t) *
414	             mempool->memblocks_max);
415	}
416
417	if (mempool->memblocks_priv_arr) {
418	    xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
419	              sizeof(void*) * mempool->memblocks_max);
420	}
421
422	if (mempool->memblocks_arr) {
423	    xge_os_free(mempool->pdev, mempool->memblocks_arr,
424	              sizeof(void*) * mempool->memblocks_max);
425	}
426
427	xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));
428}
429