1/*- 2 * Copyright (c) 2002-2007 Neterion, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include <dev/nxge/include/xgehal-fifo.h> 30#include <dev/nxge/include/xgehal-device.h> 31 32static xge_hal_status_e 33__hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh, 34 void *memblock, 35 int memblock_index, 36 xge_hal_mempool_dma_t *dma_object, 37 void *item, 38 int index, 39 int is_last, 40 void *userdata) 41{ 42 int memblock_item_idx; 43 xge_hal_fifo_txdl_priv_t *txdl_priv; 44 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)item; 45 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata; 46 47 xge_assert(item); 48 txdl_priv = (xge_hal_fifo_txdl_priv_t *) \ 49 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, 50 memblock_index, 51 item, 52 &memblock_item_idx); 53 xge_assert(txdl_priv); 54 55 /* pre-format HAL's TxDL's private */ 56 txdl_priv->dma_offset = (char*)item - (char*)memblock; 57 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; 58 txdl_priv->dma_handle = dma_object->handle; 59 txdl_priv->memblock = memblock; 60 txdl_priv->first_txdp = (xge_hal_fifo_txd_t *)item; 61 txdl_priv->next_txdl_priv = NULL; 62 txdl_priv->dang_txdl = NULL; 63 txdl_priv->dang_frags = 0; 64 txdl_priv->alloc_frags = 0; 65 66#ifdef XGE_DEBUG_ASSERT 67 txdl_priv->dma_object = dma_object; 68#endif 69 txdp->host_control = (u64)(ulong_t)txdl_priv; 70 71#ifdef XGE_HAL_ALIGN_XMIT 72 txdl_priv->align_vaddr = NULL; 73 txdl_priv->align_dma_addr = (dma_addr_t)0; 74 75#ifndef XGE_HAL_ALIGN_XMIT_ALLOC_RT 76 { 77 xge_hal_status_e status; 78 if (fifo->config->alignment_size) { 79 status =__hal_fifo_dtr_align_alloc_map(fifo, txdp); 80 if (status != XGE_HAL_OK) { 81 xge_debug_mm(XGE_ERR, 82 "align buffer[%d] %d bytes, status %d", 83 index, 84 fifo->align_size, 85 status); 86 return status; 87 } 88 } 89 } 90#endif 91#endif 92 93 if (fifo->channel.dtr_init) { 94 fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index, 95 fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL); 96 } 97 98 return XGE_HAL_OK; 99} 100 101 102static xge_hal_status_e 103__hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh, 104 void *memblock, 105 int memblock_index, 106 xge_hal_mempool_dma_t *dma_object, 107 void *item, 108 int index, 109 int is_last, 110 void *userdata) 111{ 112 int memblock_item_idx; 113 xge_hal_fifo_txdl_priv_t *txdl_priv; 114#ifdef XGE_HAL_ALIGN_XMIT 115 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata; 116#endif 117 118 xge_assert(item); 119 120 txdl_priv = (xge_hal_fifo_txdl_priv_t *) \ 121 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, 122 memblock_index, 123 item, 124 &memblock_item_idx); 125 xge_assert(txdl_priv); 126 127#ifdef XGE_HAL_ALIGN_XMIT 128 if (fifo->config->alignment_size) { 129 if (txdl_priv->align_dma_addr != 0) { 130 xge_os_dma_unmap(fifo->channel.pdev, 131 txdl_priv->align_dma_handle, 132 txdl_priv->align_dma_addr, 133 fifo->align_size, 134 XGE_OS_DMA_DIR_TODEVICE); 135 136 txdl_priv->align_dma_addr = 0; 137 } 138 139 if (txdl_priv->align_vaddr != NULL) { 140 xge_os_dma_free(fifo->channel.pdev, 141 txdl_priv->align_vaddr, 142 fifo->align_size, 143 &txdl_priv->align_dma_acch, 144 &txdl_priv->align_dma_handle); 145 146 txdl_priv->align_vaddr = NULL; 147 } 148 } 149#endif 150 151 return XGE_HAL_OK; 152} 153 154xge_hal_status_e 155__hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) 156{ 157 xge_hal_device_t *hldev; 158 xge_hal_status_e status; 159 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 160 xge_hal_fifo_queue_t *queue; 161 int i, txdl_size, max_arr_index, mid_point; 162 xge_hal_dtr_h dtrh; 163 164 hldev = (xge_hal_device_t *)fifo->channel.devh; 165 fifo->config = &hldev->config.fifo; 166 queue = &fifo->config->queue[attr->post_qid]; 167 168#if defined(XGE_HAL_TX_MULTI_RESERVE) 169 xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev); 170#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 171 xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh); 172#endif 173#if defined(XGE_HAL_TX_MULTI_POST) 174 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 175 fifo->post_lock_ptr = &hldev->xena_post_lock; 176 } else { 177 xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev); 178 fifo->post_lock_ptr = &fifo->channel.post_lock; 179 } 180#elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 181 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 182 fifo->post_lock_ptr = &hldev->xena_post_lock; 183 } else { 184 xge_os_spin_lock_init_irq(&fifo->channel.post_lock, 185 hldev->irqh); 186 fifo->post_lock_ptr = &fifo->channel.post_lock; 187 } 188#endif 189 190 fifo->align_size = 191 fifo->config->alignment_size * fifo->config->max_aligned_frags; 192 193 /* Initializing the BAR1 address as the start of 194 * the FIFO queue pointer and as a location of FIFO control 195 * word. */ 196 fifo->hw_pair = 197 (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 + 198 (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); 199 200 /* apply "interrupts per txdl" attribute */ 201 fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ; 202 if (queue->intr) { 203 fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST; 204 } 205 fifo->no_snoop_bits = 206 (int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits)); 207 208 /* 209 * FIFO memory management strategy: 210 * 211 * TxDL splitted into three independent parts: 212 * - set of TxD's 213 * - TxD HAL private part 214 * - upper layer private part 215 * 216 * Adaptative memory allocation used. i.e. Memory allocated on 217 * demand with the size which will fit into one memory block. 218 * One memory block may contain more than one TxDL. In simple case 219 * memory block size can be equal to CPU page size. On more 220 * sophisticated OS's memory block can be contigious across 221 * several pages. 222 * 223 * During "reserve" operations more memory can be allocated on demand 224 * for example due to FIFO full condition. 225 * 226 * Pool of memory memblocks never shrinks except __hal_fifo_close 227 * routine which will essentially stop channel and free the resources. 228 */ 229 230 /* TxDL common private size == TxDL private + ULD private */ 231 fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) + 232 attr->per_dtr_space; 233 fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) / 234 __xge_os_cacheline_size) * 235 __xge_os_cacheline_size; 236 237 /* recompute txdl size to be cacheline aligned */ 238 fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t); 239 txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) / 240 __xge_os_cacheline_size) * __xge_os_cacheline_size; 241 242 if (fifo->txdl_size != txdl_size) 243 xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d", 244 fifo->config->max_frags, fifo->txdl_size, txdl_size, 245 __xge_os_cacheline_size); 246 247 fifo->txdl_size = txdl_size; 248 249 /* since dtr_init() callback will be called from item_alloc(), 250 * the same way channels userdata might be used prior to 251 * channel_initialize() */ 252 fifo->channel.dtr_init = attr->dtr_init; 253 fifo->channel.userdata = attr->userdata; 254 fifo->txdl_per_memblock = fifo->config->memblock_size / 255 fifo->txdl_size; 256 257 fifo->mempool = __hal_mempool_create(hldev->pdev, 258 fifo->config->memblock_size, 259 fifo->txdl_size, 260 fifo->priv_size, 261 queue->initial, 262 queue->max, 263 __hal_fifo_mempool_item_alloc, 264 __hal_fifo_mempool_item_free, 265 fifo); 266 if (fifo->mempool == NULL) { 267 return XGE_HAL_ERR_OUT_OF_MEMORY; 268 } 269 270 status = __hal_channel_initialize(channelh, attr, 271 (void **) __hal_mempool_items_arr(fifo->mempool), 272 queue->initial, queue->max, 273 fifo->config->reserve_threshold); 274 if (status != XGE_HAL_OK) { 275 __hal_fifo_close(channelh); 276 return status; 277 } 278 xge_debug_fifo(XGE_TRACE, 279 "DTR reserve_length:%d reserve_top:%d\n" 280 "max_frags:%d reserve_threshold:%d\n" 281 "memblock_size:%d alignment_size:%d max_aligned_frags:%d", 282 fifo->channel.reserve_length, fifo->channel.reserve_top, 283 fifo->config->max_frags, fifo->config->reserve_threshold, 284 fifo->config->memblock_size, fifo->config->alignment_size, 285 fifo->config->max_aligned_frags); 286 287#ifdef XGE_DEBUG_ASSERT 288 for ( i = 0; i < fifo->channel.reserve_length; i++) { 289 xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d" 290 " handle:%p", i, fifo->channel.reserve_arr[i]); 291 } 292#endif 293 294 xge_assert(fifo->channel.reserve_length); 295 /* reverse the FIFO dtr array */ 296 max_arr_index = fifo->channel.reserve_length - 1; 297 max_arr_index -=fifo->channel.reserve_top; 298 xge_assert(max_arr_index); 299 mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2; 300 for (i = 0; i < mid_point; i++) { 301 dtrh = fifo->channel.reserve_arr[i]; 302 fifo->channel.reserve_arr[i] = 303 fifo->channel.reserve_arr[max_arr_index - i]; 304 fifo->channel.reserve_arr[max_arr_index - i] = dtrh; 305 } 306 307#ifdef XGE_DEBUG_ASSERT 308 for ( i = 0; i < fifo->channel.reserve_length; i++) { 309 xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d" 310 " handle:%p", i, fifo->channel.reserve_arr[i]); 311 } 312#endif 313 314 return XGE_HAL_OK; 315} 316 317void 318__hal_fifo_close(xge_hal_channel_h channelh) 319{ 320 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 321 xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh; 322 323 if (fifo->mempool) { 324 __hal_mempool_destroy(fifo->mempool); 325 } 326 327 __hal_channel_terminate(channelh); 328 329#if defined(XGE_HAL_TX_MULTI_RESERVE) 330 xge_os_spin_lock_destroy(&fifo->channel.reserve_lock, hldev->pdev); 331#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 332 xge_os_spin_lock_destroy_irq(&fifo->channel.reserve_lock, hldev->pdev); 333#endif 334 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 335#if defined(XGE_HAL_TX_MULTI_POST) 336 xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev); 337#elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 338 xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock, 339 hldev->pdev); 340#endif 341 } 342} 343 344void 345__hal_fifo_hw_initialize(xge_hal_device_h devh) 346{ 347 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 348 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 349 u64* tx_fifo_partitions[4]; 350 u64* tx_fifo_wrr[5]; 351 u64 tx_fifo_wrr_value[5]; 352 u64 val64, part0; 353 int i; 354 355 /* Tx DMA Initialization */ 356 357 tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0; 358 tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1; 359 tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2; 360 tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3; 361 362 tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0; 363 tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1; 364 tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2; 365 tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3; 366 tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4; 367 368 tx_fifo_wrr_value[0] = XGE_HAL_FIFO_WRR_0; 369 tx_fifo_wrr_value[1] = XGE_HAL_FIFO_WRR_1; 370 tx_fifo_wrr_value[2] = XGE_HAL_FIFO_WRR_2; 371 tx_fifo_wrr_value[3] = XGE_HAL_FIFO_WRR_3; 372 tx_fifo_wrr_value[4] = XGE_HAL_FIFO_WRR_4; 373 374 /* Note: WRR calendar must be configured before the transmit 375 * FIFOs are enabled! page 6-77 user guide */ 376 377 if (!hldev->config.rts_qos_en) { 378 /* all zeroes for Round-Robin */ 379 for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { 380 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, 381 tx_fifo_wrr[i]); 382 } 383 384 /* reset all of them but '0' */ 385 for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) { 386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 387 tx_fifo_partitions[i]); 388 } 389 } else { /* Change the default settings */ 390 391 for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { 392 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 393 tx_fifo_wrr_value[i], tx_fifo_wrr[i]); 394 } 395 } 396 397 /* configure only configured FIFOs */ 398 val64 = 0; part0 = 0; 399 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 400 int reg_half = i % 2; 401 int reg_num = i / 2; 402 403 if (hldev->config.fifo.queue[i].configured) { 404 int priority = hldev->config.fifo.queue[i].priority; 405 val64 |= 406 vBIT((hldev->config.fifo.queue[i].max-1), 407 (((reg_half) * 32) + 19), 408 13) | vBIT(priority, (((reg_half)*32) + 5), 3); 409 } 410 411 /* NOTE: do write operation for each second u64 half 412 * or force for first one if configured number 413 * is even */ 414 if (reg_half) { 415 if (reg_num == 0) { 416 /* skip partition '0', must write it once at 417 * the end */ 418 part0 = val64; 419 } else { 420 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 421 val64, tx_fifo_partitions[reg_num]); 422 xge_debug_fifo(XGE_TRACE, 423 "fifo partition_%d at: " 424 "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, 425 reg_num, (unsigned long long)(ulong_t) 426 tx_fifo_partitions[reg_num], 427 (unsigned long long)val64); 428 } 429 val64 = 0; 430 } 431 } 432 433 part0 |= BIT(0); /* to enable the FIFO partition. */ 434 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0, 435 tx_fifo_partitions[0]); 436 xge_os_wmb(); 437 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32), 438 tx_fifo_partitions[0]); 439 xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: " 440 "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, 441 (unsigned long long)(ulong_t) 442 tx_fifo_partitions[0], 443 (unsigned long long) part0); 444 445 /* 446 * Initialization of Tx_PA_CONFIG register to ignore packet 447 * integrity checking. 448 */ 449 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 450 &bar0->tx_pa_cfg); 451 val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR | 452 XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI | 453 XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL | 454 XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR; 455 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 456 &bar0->tx_pa_cfg); 457 458 /* 459 * Assign MSI-X vectors 460 */ 461 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 462 xge_list_t *item; 463 xge_hal_channel_t *channel = NULL; 464 465 if (!hldev->config.fifo.queue[i].configured || 466 !hldev->config.fifo.queue[i].intr_vector || 467 !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX) 468 continue; 469 470 /* find channel */ 471 xge_list_for_each(item, &hldev->free_channels) { 472 xge_hal_channel_t *tmp; 473 tmp = xge_container_of(item, xge_hal_channel_t, 474 item); 475 if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO && 476 tmp->post_qid == i) { 477 channel = tmp; 478 break; 479 } 480 } 481 482 if (channel) { 483 xge_hal_channel_msix_set(channel, 484 hldev->config.fifo.queue[i].intr_vector); 485 } 486 } 487 488 xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized"); 489} 490 491#ifdef XGE_HAL_ALIGN_XMIT 492void 493__hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 494{ 495 xge_hal_fifo_txdl_priv_t *txdl_priv; 496 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 497 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 498 499 txdl_priv = __hal_fifo_txdl_priv(txdp); 500 501 if (txdl_priv->align_dma_addr != 0) { 502 xge_os_dma_unmap(fifo->channel.pdev, 503 txdl_priv->align_dma_handle, 504 txdl_priv->align_dma_addr, 505 fifo->align_size, 506 XGE_OS_DMA_DIR_TODEVICE); 507 508 txdl_priv->align_dma_addr = 0; 509 } 510 511 if (txdl_priv->align_vaddr != NULL) { 512 xge_os_dma_free(fifo->channel.pdev, 513 txdl_priv->align_vaddr, 514 fifo->align_size, 515 &txdl_priv->align_dma_acch, 516 &txdl_priv->align_dma_handle); 517 518 519 txdl_priv->align_vaddr = NULL; 520 } 521 } 522 523xge_hal_status_e 524__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 525{ 526 xge_hal_fifo_txdl_priv_t *txdl_priv; 527 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 528 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 529 530 xge_assert(txdp); 531 532 txdl_priv = __hal_fifo_txdl_priv(txdp); 533 534 /* allocate alignment DMA-buffer */ 535 txdl_priv->align_vaddr = (char *)xge_os_dma_malloc(fifo->channel.pdev, 536 fifo->align_size, 537 XGE_OS_DMA_CACHELINE_ALIGNED | 538 XGE_OS_DMA_STREAMING, 539 &txdl_priv->align_dma_handle, 540 &txdl_priv->align_dma_acch); 541 if (txdl_priv->align_vaddr == NULL) { 542 return XGE_HAL_ERR_OUT_OF_MEMORY; 543 } 544 545 /* map it */ 546 txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev, 547 txdl_priv->align_dma_handle, txdl_priv->align_vaddr, 548 fifo->align_size, 549 XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING); 550 551 if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) { 552 __hal_fifo_dtr_align_free_unmap(channelh, dtrh); 553 return XGE_HAL_ERR_OUT_OF_MAPPING; 554 } 555 556 return XGE_HAL_OK; 557} 558#endif 559 560 561