vxgehal-mm.c revision 330897
1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright(c) 2002-2011 Exar Corp. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification are permitted provided the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Exar Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33/*$FreeBSD: stable/11/sys/dev/vxge/vxgehal/vxgehal-mm.c 330897 2018-03-14 03:19:51Z eadler $*/ 34 35#include <dev/vxge/vxgehal/vxgehal.h> 36 37/* 38 * __hal_mempool_grow 39 * 40 * Will resize mempool up to %num_allocate value. 41 */ 42static vxge_hal_status_e 43__hal_mempool_grow( 44 vxge_hal_mempool_t *mempool, 45 u32 num_allocate, 46 u32 *num_allocated) 47{ 48 u32 i, j, k, item_index, is_last; 49 u32 first_time = mempool->memblocks_allocated == 0 ? 1 : 0; 50 u32 n_items = mempool->items_per_memblock; 51 u32 start_block_idx = mempool->memblocks_allocated; 52 u32 end_block_idx = mempool->memblocks_allocated + num_allocate; 53 __hal_device_t *hldev; 54 55 vxge_assert(mempool != NULL); 56 57 hldev = (__hal_device_t *) mempool->devh; 58 59 vxge_hal_trace_log_mm("==> %s:%s:%d", 60 __FILE__, __func__, __LINE__); 61 62 vxge_hal_trace_log_mm( 63 "mempool = 0x"VXGE_OS_STXFMT", num_allocate = %d, " 64 "num_allocated = 0x"VXGE_OS_STXFMT, (ptr_t) mempool, 65 num_allocate, (ptr_t) num_allocated); 66 67 *num_allocated = 0; 68 69 if (end_block_idx > mempool->memblocks_max) { 70 vxge_hal_err_log_mm("%s", 71 "__hal_mempool_grow: can grow anymore"); 72 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 73 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 74 return (VXGE_HAL_ERR_OUT_OF_MEMORY); 75 } 76 77 for (i = start_block_idx; i < end_block_idx; i++) { 78 79 void *the_memblock; 80 vxge_hal_mempool_dma_t *dma_object; 81 82 is_last = ((end_block_idx - 1) == i); 83 dma_object = mempool->memblocks_dma_arr + i; 84 85 /* 86 * allocate memblock's private part. Each DMA memblock 87 * has a space allocated for item's private usage upon 88 * mempool's user request. Each time mempool grows, it will 89 * allocate new memblock and its private part at once. 90 * This helps to minimize memory usage a lot. 91 */ 92 mempool->memblocks_priv_arr[i] = vxge_os_malloc( 93 ((__hal_device_t *) mempool->devh)->header.pdev, 94 mempool->items_priv_size * n_items); 95 if (mempool->memblocks_priv_arr[i] == NULL) { 96 97 vxge_hal_err_log_mm("memblock_priv[%d]: \ 98 out of virtual memory, " 99 "requested %d(%d:%d) bytes", i, 100 mempool->items_priv_size * n_items, 101 mempool->items_priv_size, n_items); 102 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 103 __FILE__, __func__, __LINE__, 104 VXGE_HAL_ERR_OUT_OF_MEMORY); 105 return (VXGE_HAL_ERR_OUT_OF_MEMORY); 106 107 } 108 109 vxge_os_memzero(mempool->memblocks_priv_arr[i], 110 mempool->items_priv_size * n_items); 111 112 /* allocate DMA-capable memblock */ 113 mempool->memblocks_arr[i] = 114 __hal_blockpool_malloc(mempool->devh, 115 mempool->memblock_size, 116 &dma_object->addr, 117 &dma_object->handle, 118 &dma_object->acc_handle); 119 if (mempool->memblocks_arr[i] == NULL) { 120 vxge_os_free( 121 ((__hal_device_t *) mempool->devh)->header.pdev, 122 mempool->memblocks_priv_arr[i], 123 mempool->items_priv_size * n_items); 124 vxge_hal_err_log_mm("memblock[%d]: \ 125 out of DMA memory", i); 126 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 127 __FILE__, __func__, __LINE__, 128 VXGE_HAL_ERR_OUT_OF_MEMORY); 129 return (VXGE_HAL_ERR_OUT_OF_MEMORY); 130 } 131 132 (*num_allocated)++; 133 mempool->memblocks_allocated++; 134 135 vxge_os_memzero(mempool->memblocks_arr[i], 136 mempool->memblock_size); 137 138 the_memblock = mempool->memblocks_arr[i]; 139 140 /* fill the items hash array */ 141 for (j = 0; j < n_items; j++) { 142 item_index = i * n_items + j; 143 144 if (first_time && (item_index >= mempool->items_initial)) 145 break; 146 147 mempool->items_arr[item_index] = 148 ((char *) the_memblock + j *mempool->item_size); 149 150 /* let caller to do more job on each item */ 151 if (mempool->item_func_alloc != NULL) { 152 vxge_hal_status_e status; 153 154 if ((status = mempool->item_func_alloc( 155 mempool, 156 the_memblock, 157 i, 158 dma_object, 159 mempool->items_arr[item_index], 160 item_index, 161 is_last, 162 mempool->userdata)) != VXGE_HAL_OK) { 163 164 if (mempool->item_func_free != NULL) { 165 166 for (k = 0; k < j; k++) { 167 168 item_index = i * n_items + k; 169 170 (void) mempool->item_func_free( 171 mempool, 172 the_memblock, 173 i, dma_object, 174 mempool->items_arr[item_index], 175 item_index, is_last, 176 mempool->userdata); 177 } 178 } 179 180 vxge_os_free(((__hal_device_t *) 181 mempool->devh)->header.pdev, 182 mempool->memblocks_priv_arr[i], 183 mempool->items_priv_size * 184 n_items); 185 186 __hal_blockpool_free(mempool->devh, 187 the_memblock, 188 mempool->memblock_size, 189 &dma_object->addr, 190 &dma_object->handle, 191 &dma_object->acc_handle); 192 193 (*num_allocated)--; 194 mempool->memblocks_allocated--; 195 return (status); 196 } 197 } 198 199 mempool->items_current = item_index + 1; 200 } 201 202 vxge_hal_info_log_mm( 203 "memblock%d: allocated %dk, vaddr 0x"VXGE_OS_STXFMT", " 204 "dma_addr 0x"VXGE_OS_STXFMT, 205 i, mempool->memblock_size / 1024, 206 (ptr_t) mempool->memblocks_arr[i], dma_object->addr); 207 208 if (first_time && mempool->items_current == 209 mempool->items_initial) { 210 break; 211 } 212 } 213 214 vxge_hal_trace_log_mm("<== %s:%s:%d Result: 0", 215 __FILE__, __func__, __LINE__); 216 217 return (VXGE_HAL_OK); 218} 219 220/* 221 * vxge_hal_mempool_create 222 * @memblock_size: 223 * @items_initial: 224 * @items_max: 225 * @item_size: 226 * @item_func: 227 * 228 * This function will create memory pool object. Pool may grow but will 229 * never shrink. Pool consists of number of dynamically allocated blocks 230 * with size enough to hold %items_initial number of items. Memory is 231 * DMA-able but client must map/unmap before interoperating with the device. 232 * See also: vxge_os_dma_map(), vxge_hal_dma_unmap(), vxge_hal_status_e {}. 233 */ 234vxge_hal_mempool_t * 235vxge_hal_mempool_create( 236 vxge_hal_device_h devh, 237 u32 memblock_size, 238 u32 item_size, 239 u32 items_priv_size, 240 u32 items_initial, 241 u32 items_max, 242 vxge_hal_mempool_item_f item_func_alloc, 243 vxge_hal_mempool_item_f item_func_free, 244 void *userdata) 245{ 246 vxge_hal_status_e status; 247 u32 memblocks_to_allocate; 248 vxge_hal_mempool_t *mempool; 249 __hal_device_t *hldev; 250 u32 allocated; 251 252 vxge_assert(devh != NULL); 253 254 hldev = (__hal_device_t *) devh; 255 256 vxge_hal_trace_log_mm("==> %s:%s:%d", 257 __FILE__, __func__, __LINE__); 258 259 vxge_hal_trace_log_mm( 260 "devh = 0x"VXGE_OS_STXFMT", memblock_size = %d, item_size = %d, " 261 "items_priv_size = %d, items_initial = %d, items_max = %d, " 262 "item_func_alloc = 0x"VXGE_OS_STXFMT", " 263 "item_func_free = 0x"VXGE_OS_STXFMT", " 264 "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) devh, 265 memblock_size, item_size, items_priv_size, 266 items_initial, items_max, (ptr_t) item_func_alloc, 267 (ptr_t) item_func_free, (ptr_t) userdata); 268 269 if (memblock_size < item_size) { 270 vxge_hal_err_log_mm( 271 "memblock_size %d < item_size %d: misconfiguration", 272 memblock_size, item_size); 273 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 274 __FILE__, __func__, __LINE__, VXGE_HAL_FAIL); 275 return (NULL); 276 } 277 278 mempool = (vxge_hal_mempool_t *) vxge_os_malloc( 279 ((__hal_device_t *) devh)->header.pdev, sizeof(vxge_hal_mempool_t)); 280 if (mempool == NULL) { 281 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 282 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 283 return (NULL); 284 } 285 vxge_os_memzero(mempool, sizeof(vxge_hal_mempool_t)); 286 287 mempool->devh = devh; 288 mempool->memblock_size = memblock_size; 289 mempool->items_max = items_max; 290 mempool->items_initial = items_initial; 291 mempool->item_size = item_size; 292 mempool->items_priv_size = items_priv_size; 293 mempool->item_func_alloc = item_func_alloc; 294 mempool->item_func_free = item_func_free; 295 mempool->userdata = userdata; 296 297 mempool->memblocks_allocated = 0; 298 299 if (memblock_size != VXGE_OS_HOST_PAGE_SIZE) 300 mempool->dma_flags = VXGE_OS_DMA_CACHELINE_ALIGNED; 301 302#if defined(VXGE_HAL_DMA_CONSISTENT) 303 mempool->dma_flags |= VXGE_OS_DMA_CONSISTENT; 304#else 305 mempool->dma_flags |= VXGE_OS_DMA_STREAMING; 306#endif 307 308 mempool->items_per_memblock = memblock_size / item_size; 309 310 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / 311 mempool->items_per_memblock; 312 313 /* allocate array of memblocks */ 314 mempool->memblocks_arr = (void **)vxge_os_malloc( 315 ((__hal_device_t *) mempool->devh)->header.pdev, 316 sizeof(void *) * mempool->memblocks_max); 317 if (mempool->memblocks_arr == NULL) { 318 vxge_hal_mempool_destroy(mempool); 319 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 320 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 321 return (NULL); 322 } 323 vxge_os_memzero(mempool->memblocks_arr, 324 sizeof(void *) * mempool->memblocks_max); 325 326 /* allocate array of private parts of items per memblocks */ 327 mempool->memblocks_priv_arr = (void **)vxge_os_malloc( 328 ((__hal_device_t *) mempool->devh)->header.pdev, 329 sizeof(void *) * mempool->memblocks_max); 330 if (mempool->memblocks_priv_arr == NULL) { 331 vxge_hal_mempool_destroy(mempool); 332 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 333 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 334 return (NULL); 335 } 336 vxge_os_memzero(mempool->memblocks_priv_arr, 337 sizeof(void *) * mempool->memblocks_max); 338 339 /* allocate array of memblocks DMA objects */ 340 mempool->memblocks_dma_arr = 341 (vxge_hal_mempool_dma_t *) vxge_os_malloc( 342 ((__hal_device_t *) mempool->devh)->header.pdev, 343 sizeof(vxge_hal_mempool_dma_t) * mempool->memblocks_max); 344 345 if (mempool->memblocks_dma_arr == NULL) { 346 vxge_hal_mempool_destroy(mempool); 347 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 348 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 349 return (NULL); 350 } 351 vxge_os_memzero(mempool->memblocks_dma_arr, 352 sizeof(vxge_hal_mempool_dma_t) * mempool->memblocks_max); 353 354 /* allocate hash array of items */ 355 mempool->items_arr = (void **)vxge_os_malloc( 356 ((__hal_device_t *) mempool->devh)->header.pdev, 357 sizeof(void *) * mempool->items_max); 358 if (mempool->items_arr == NULL) { 359 vxge_hal_mempool_destroy(mempool); 360 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 361 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 362 return (NULL); 363 } 364 vxge_os_memzero(mempool->items_arr, 365 sizeof(void *) * mempool->items_max); 366 367 mempool->shadow_items_arr = (void **)vxge_os_malloc( 368 ((__hal_device_t *) mempool->devh)->header.pdev, 369 sizeof(void *) * mempool->items_max); 370 if (mempool->shadow_items_arr == NULL) { 371 vxge_hal_mempool_destroy(mempool); 372 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 373 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 374 return (NULL); 375 } 376 vxge_os_memzero(mempool->shadow_items_arr, 377 sizeof(void *) * mempool->items_max); 378 379 /* calculate initial number of memblocks */ 380 memblocks_to_allocate = (mempool->items_initial + 381 mempool->items_per_memblock - 1) / 382 mempool->items_per_memblock; 383 384 vxge_hal_info_log_mm("allocating %d memblocks, " 385 "%d items per memblock", memblocks_to_allocate, 386 mempool->items_per_memblock); 387 388 /* pre-allocate the mempool */ 389 status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated); 390 vxge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr, 391 sizeof(void *) * mempool->items_max); 392 if (status != VXGE_HAL_OK) { 393 vxge_hal_mempool_destroy(mempool); 394 vxge_hal_trace_log_mm("<== %s:%s:%d Result: %d", 395 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 396 return (NULL); 397 } 398 399 vxge_hal_info_log_mm( 400 "total: allocated %dk of DMA-capable memory", 401 mempool->memblock_size * allocated / 1024); 402 403 vxge_hal_trace_log_mm("<== %s:%s:%d Result: 0", 404 __FILE__, __func__, __LINE__); 405 406 return (mempool); 407} 408 409/* 410 * vxge_hal_mempool_destroy 411 */ 412void 413vxge_hal_mempool_destroy( 414 vxge_hal_mempool_t *mempool) 415{ 416 u32 i, j, item_index; 417 __hal_device_t *hldev; 418 419 vxge_assert(mempool != NULL); 420 421 hldev = (__hal_device_t *) mempool->devh; 422 423 vxge_hal_trace_log_mm("==> %s:%s:%d", 424 __FILE__, __func__, __LINE__); 425 426 vxge_hal_trace_log_mm("mempool = 0x"VXGE_OS_STXFMT, 427 (ptr_t) mempool); 428 429 for (i = 0; i < mempool->memblocks_allocated; i++) { 430 vxge_hal_mempool_dma_t *dma_object; 431 432 vxge_assert(mempool->memblocks_arr[i]); 433 vxge_assert(mempool->memblocks_dma_arr + i); 434 435 dma_object = mempool->memblocks_dma_arr + i; 436 437 for (j = 0; j < mempool->items_per_memblock; j++) { 438 item_index = i * mempool->items_per_memblock + j; 439 440 /* to skip last partially filled(if any) memblock */ 441 if (item_index >= mempool->items_current) 442 break; 443 444 /* let caller to do more job on each item */ 445 if (mempool->item_func_free != NULL) { 446 447 mempool->item_func_free(mempool, 448 mempool->memblocks_arr[i], 449 i, dma_object, 450 mempool->shadow_items_arr[item_index], 451 item_index, /* unused */ -1, 452 mempool->userdata); 453 } 454 } 455 456 vxge_os_free(hldev->header.pdev, 457 mempool->memblocks_priv_arr[i], 458 mempool->items_priv_size * mempool->items_per_memblock); 459 460 __hal_blockpool_free(hldev, 461 mempool->memblocks_arr[i], 462 mempool->memblock_size, 463 &dma_object->addr, 464 &dma_object->handle, 465 &dma_object->acc_handle); 466 } 467 468 if (mempool->items_arr) { 469 vxge_os_free(hldev->header.pdev, 470 mempool->items_arr, sizeof(void *) * mempool->items_max); 471 } 472 473 if (mempool->shadow_items_arr) { 474 vxge_os_free(hldev->header.pdev, 475 mempool->shadow_items_arr, 476 sizeof(void *) * mempool->items_max); 477 } 478 479 if (mempool->memblocks_dma_arr) { 480 vxge_os_free(hldev->header.pdev, 481 mempool->memblocks_dma_arr, 482 sizeof(vxge_hal_mempool_dma_t) * 483 mempool->memblocks_max); 484 } 485 486 if (mempool->memblocks_priv_arr) { 487 vxge_os_free(hldev->header.pdev, 488 mempool->memblocks_priv_arr, 489 sizeof(void *) * mempool->memblocks_max); 490 } 491 492 if (mempool->memblocks_arr) { 493 vxge_os_free(hldev->header.pdev, 494 mempool->memblocks_arr, 495 sizeof(void *) * mempool->memblocks_max); 496 } 497 498 vxge_os_free(hldev->header.pdev, 499 mempool, sizeof(vxge_hal_mempool_t)); 500 501 vxge_hal_trace_log_mm("<== %s:%s:%d Result: 0", 502 __FILE__, __func__, __LINE__); 503} 504 505/* 506 * vxge_hal_check_alignment - Check buffer alignment and calculate the 507 * "misaligned" portion. 508 * @dma_pointer: DMA address of the buffer. 509 * @size: Buffer size, in bytes. 510 * @alignment: Alignment "granularity" (see below), in bytes. 511 * @copy_size: Maximum number of bytes to "extract" from the buffer 512 * (in order to spost it as a separate scatter-gather entry). See below. 513 * 514 * Check buffer alignment and calculate "misaligned" portion, if exists. 515 * The buffer is considered aligned if its address is multiple of 516 * the specified @alignment. If this is the case, 517 * vxge_hal_check_alignment() returns zero. 518 * Otherwise, vxge_hal_check_alignment() uses the last argument, 519 * @copy_size, 520 * to calculate the size to "extract" from the buffer. The @copy_size 521 * may or may not be equal @alignment. The difference between these two 522 * arguments is that the @alignment is used to make the decision: aligned 523 * or not aligned. While the @copy_size is used to calculate the portion 524 * of the buffer to "extract", i.e. to post as a separate entry in the 525 * transmit descriptor. For example, the combination 526 * @alignment = 8 and @copy_size = 64 will work okay on AMD Opteron boxes. 527 * 528 * Note: @copy_size should be a multiple of @alignment. In many practical 529 * cases @copy_size and @alignment will probably be equal. 530 * 531 * See also: vxge_hal_fifo_txdl_buffer_set_aligned(). 532 */ 533u32 534vxge_hal_check_alignment( 535 dma_addr_t dma_pointer, 536 u32 size, 537 u32 alignment, 538 u32 copy_size) 539{ 540 u32 misaligned_size; 541 542 misaligned_size = (int)(dma_pointer & (alignment - 1)); 543 if (!misaligned_size) { 544 return (0); 545 } 546 547 if (size > copy_size) { 548 misaligned_size = (int)(dma_pointer & (copy_size - 1)); 549 misaligned_size = copy_size - misaligned_size; 550 } else { 551 misaligned_size = size; 552 } 553 554 return (misaligned_size); 555} 556