cvmx-cmd-queue.h revision 232812
1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Inc. nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41 42 43 44 45 46/** 47 * @file 48 * 49 * Support functions for managing command queues used for 50 * various hardware blocks. 51 * 52 * The common command queue infrastructure abstracts out the 53 * software necessary for adding to Octeon's chained queue 54 * structures. These structures are used for commands to the 55 * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each 56 * hardware unit takes commands and CSRs of different types, 57 * they all use basic linked command buffers to store the 58 * pending request. In general, users of the CVMX API don't 59 * call cvmx-cmd-queue functions directly. Instead the hardware 60 * unit specific wrapper should be used. The wrappers perform 61 * unit specific validation and CSR writes to submit the 62 * commands. 63 * 64 * Even though most software will never directly interact with 65 * cvmx-cmd-queue, knowledge of its internal workings can help 66 * in diagnosing performance problems and help with debugging. 67 * 68 * Command queue pointers are stored in a global named block 69 * called "cvmx_cmd_queues". Except for the PKO queues, each 70 * hardware queue is stored in its own cache line to reduce SMP 71 * contention on spin locks. The PKO queues are stored such that 72 * every 16th queue is next to each other in memory. This scheme 73 * allows for queues being in separate cache lines when there 74 * are low number of queues per port. With 16 queues per port, 75 * the first queue for each port is in the same cache area. The 76 * second queues for each port are in another area, etc. This 77 * allows software to implement very efficient lockless PKO with 78 * 16 queues per port using a minimum of cache lines per core. 79 * All queues for a given core will be isolated in the same 80 * cache area. 81 * 82 * In addition to the memory pointer layout, cvmx-cmd-queue 83 * provides an optimized fair ll/sc locking mechanism for the 84 * queues. The lock uses a "ticket / now serving" model to 85 * maintain fair order on contended locks. In addition, it uses 86 * predicted locking time to limit cache contention. When a core 87 * know it must wait in line for a lock, it spins on the 88 * internal cycle counter to completely eliminate any causes of 89 * bus traffic. 90 * 91 * <hr> $Revision: 70030 $ <hr> 92 */ 93 94#ifndef __CVMX_CMD_QUEUE_H__ 95#define __CVMX_CMD_QUEUE_H__ 96 97#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) 98#include "executive-config.h" 99#include "cvmx-config.h" 100#endif 101 102#include "cvmx-fpa.h" 103 104#ifdef __cplusplus 105extern "C" { 106#endif 107 108/** 109 * By default we disable the max depth support. Most programs 110 * don't use it and it slows down the command queue processing 111 * significantly. 112 */ 113#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 114#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0 115#endif 116 117/** 118 * Enumeration representing all hardware blocks that use command 119 * queues. Each hardware block has up to 65536 sub identifiers for 120 * multiple command queues. Not all chips support all hardware 121 * units. 122 */ 123typedef enum 124{ 125 CVMX_CMD_QUEUE_PKO_BASE = 0x00000, 126#define CVMX_CMD_QUEUE_PKO(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue)))) 127 CVMX_CMD_QUEUE_ZIP = 0x10000, 128#define CVMX_CMD_QUEUE_ZIP_QUE(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff&(queue)))) 129 CVMX_CMD_QUEUE_DFA = 0x20000, 130 CVMX_CMD_QUEUE_RAID = 0x30000, 131 CVMX_CMD_QUEUE_DMA_BASE = 0x40000, 132#define CVMX_CMD_QUEUE_DMA(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue)))) 133 CVMX_CMD_QUEUE_END = 0x50000, 134} cvmx_cmd_queue_id_t; 135 136/** 137 * Command write operations can fail if the command queue needs 138 * a new buffer and the associated FPA pool is empty. It can also 139 * fail if the number of queued command words reaches the maximum 140 * set at initialization. 141 */ 142typedef enum 143{ 144 CVMX_CMD_QUEUE_SUCCESS = 0, 145 CVMX_CMD_QUEUE_NO_MEMORY = -1, 146 CVMX_CMD_QUEUE_FULL = -2, 147 CVMX_CMD_QUEUE_INVALID_PARAM = -3, 148 CVMX_CMD_QUEUE_ALREADY_SETUP = -4, 149} cvmx_cmd_queue_result_t; 150 151typedef struct 152{ 153 uint8_t now_serving; /**< You have lock when this is your ticket */ 154 uint64_t unused1 : 24; 155 uint32_t max_depth; /**< Maximum outstanding command words */ 156 uint64_t fpa_pool : 3; /**< FPA pool buffers come from */ 157 uint64_t base_ptr_div128: 29; /**< Top of command buffer pointer shifted 7 */ 158 uint64_t unused2 : 6; 159 uint64_t pool_size_m1 : 13; /**< FPA buffer size in 64bit words minus 1 */ 160 uint64_t index : 13; /**< Number of commands already used in buffer */ 161} __cvmx_cmd_queue_state_t; 162 163/** 164 * This structure contains the global state of all command queues. 165 * It is stored in a bootmem named block and shared by all 166 * applications running on Octeon. Tickets are stored in a different 167 * cache line that queue information to reduce the contention on the 168 * ll/sc used to get a ticket. If this is not the case, the update 169 * of queue state causes the ll/sc to fail quite often. 170 */ 171typedef struct 172{ 173 uint64_t ticket[(CVMX_CMD_QUEUE_END>>16) * 256]; 174 __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END>>16) * 256]; 175} __cvmx_cmd_queue_all_state_t; 176 177extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr; 178 179/** 180 * Initialize a command queue for use. The initial FPA buffer is 181 * allocated and the hardware unit is configured to point to the 182 * new command queue. 183 * 184 * @param queue_id Hardware command queue to initialize. 185 * @param max_depth Maximum outstanding commands that can be queued. 186 * @param fpa_pool FPA pool the command queues should come from. 187 * @param pool_size Size of each buffer in the FPA pool (bytes) 188 * 189 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 190 */ 191cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size); 192 193/** 194 * Shutdown a queue a free it's command buffers to the FPA. The 195 * hardware connected to the queue must be stopped before this 196 * function is called. 197 * 198 * @param queue_id Queue to shutdown 199 * 200 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 201 */ 202cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id); 203 204/** 205 * Return the number of command words pending in the queue. This 206 * function may be relatively slow for some hardware units. 207 * 208 * @param queue_id Hardware command queue to query 209 * 210 * @return Number of outstanding commands 211 */ 212int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id); 213 214/** 215 * Return the command buffer to be written to. The purpose of this 216 * function is to allow CVMX routine access to the low level buffer 217 * for initial hardware setup. User applications should not call this 218 * function directly. 219 * 220 * @param queue_id Command queue to query 221 * 222 * @return Command buffer or NULL on failure 223 */ 224void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id); 225 226/** 227 * @INTERNAL 228 * Get the index into the state arrays for the supplied queue id. 229 * 230 * @param queue_id Queue ID to get an index for 231 * 232 * @return Index into the state arrays 233 */ 234static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id) 235{ 236 /* Warning: This code currently only works with devices that have 256 queues 237 or less. Devices with more than 16 queues are laid out in memory to allow 238 cores quick access to every 16th queue. This reduces cache thrashing 239 when you are running 16 queues per port to support lockless operation */ 240 int unit = queue_id>>16; 241 int q = (queue_id >> 4) & 0xf; 242 int core = queue_id & 0xf; 243 return unit*256 + core*16 + q; 244} 245 246 247/** 248 * @INTERNAL 249 * Lock the supplied queue so nobody else is updating it at the same 250 * time as us. 251 * 252 * @param queue_id Queue ID to lock 253 * @param qptr Pointer to the queue's global state 254 */ 255static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, __cvmx_cmd_queue_state_t *qptr) 256{ 257 int tmp; 258 int my_ticket; 259 CVMX_PREFETCH(qptr, 0); 260 asm volatile ( 261 ".set push\n" 262 ".set noreorder\n" 263 "1:\n" 264 "ll %[my_ticket], %[ticket_ptr]\n" /* Atomic add one to ticket_ptr */ 265 "li %[ticket], 1\n" /* and store the original value */ 266 "baddu %[ticket], %[my_ticket]\n" /* in my_ticket */ 267 "sc %[ticket], %[ticket_ptr]\n" 268 "beqz %[ticket], 1b\n" 269 " nop\n" 270 "lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */ 271 "2:\n" 272 "beq %[ticket], %[my_ticket], 4f\n" /* Jump out if now_serving == my_ticket */ 273 " subu %[ticket], %[my_ticket], %[ticket]\n" /* Find out how many tickets are in front of me */ 274 "subu %[ticket], 1\n" /* Use tickets in front of me minus one to delay */ 275 "cins %[ticket], %[ticket], 5, 7\n" /* Delay will be ((tickets in front)-1)*32 loops */ 276 "3:\n" 277 "bnez %[ticket], 3b\n" /* Loop here until our ticket might be up */ 278 " subu %[ticket], 1\n" 279 "b 2b\n" /* Jump back up to check out ticket again */ 280 " lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */ 281 "4:\n" 282 ".set pop\n" 283 : [ticket_ptr] "=m" (__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 284 [now_serving] "=m" (qptr->now_serving), 285 [ticket] "=r" (tmp), 286 [my_ticket] "=r" (my_ticket) 287 ); 288} 289 290 291/** 292 * @INTERNAL 293 * Unlock the queue, flushing all writes. 294 * 295 * @param qptr Queue to unlock 296 */ 297static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr) 298{ 299 uint8_t ns; 300 301 ns = qptr->now_serving + 1; 302 CVMX_SYNCWS; /* Order queue manipulation with respect to the unlock. */ 303 qptr->now_serving = ns; 304 CVMX_SYNCWS; /* nudge out the unlock. */ 305} 306 307 308/** 309 * @INTERNAL 310 * Get the queue state structure for the given queue id 311 * 312 * @param queue_id Queue id to get 313 * 314 * @return Queue structure or NULL on failure 315 */ 316static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id) 317{ 318 if (CVMX_ENABLE_PARAMETER_CHECKING) 319 { 320 if (cvmx_unlikely(queue_id >= CVMX_CMD_QUEUE_END)) 321 return NULL; 322 if (cvmx_unlikely((queue_id & 0xffff) >= 256)) 323 return NULL; 324 } 325 return &__cvmx_cmd_queue_state_ptr->state[__cvmx_cmd_queue_get_index(queue_id)]; 326} 327 328 329/** 330 * Write an arbitrary number of command words to a command queue. 331 * This is a generic function; the fixed number of command word 332 * functions yield higher performance. 333 * 334 * @param queue_id Hardware command queue to write to 335 * @param use_locking 336 * Use internal locking to ensure exclusive access for queue 337 * updates. If you don't use this locking you must ensure 338 * exclusivity some other way. Locking is strongly recommended. 339 * @param cmd_count Number of command words to write 340 * @param cmds Array of commands to write 341 * 342 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 343 */ 344static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, int use_locking, int cmd_count, uint64_t *cmds) 345{ 346 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 347 348 if (CVMX_ENABLE_PARAMETER_CHECKING) 349 { 350 if (cvmx_unlikely(qptr == NULL)) 351 return CVMX_CMD_QUEUE_INVALID_PARAM; 352 if (cvmx_unlikely((cmd_count < 1) || (cmd_count > 32))) 353 return CVMX_CMD_QUEUE_INVALID_PARAM; 354 if (cvmx_unlikely(cmds == NULL)) 355 return CVMX_CMD_QUEUE_INVALID_PARAM; 356 } 357 358 /* Make sure nobody else is updating the same queue */ 359 if (cvmx_likely(use_locking)) 360 __cvmx_cmd_queue_lock(queue_id, qptr); 361 362 /* If a max queue length was specified then make sure we don't 363 exceed it. If any part of the command would be below the limit 364 we allow it */ 365 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth)) 366 { 367 if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) 368 { 369 if (cvmx_likely(use_locking)) 370 __cvmx_cmd_queue_unlock(qptr); 371 return CVMX_CMD_QUEUE_FULL; 372 } 373 } 374 375 /* Normally there is plenty of room in the current buffer for the command */ 376 if (cvmx_likely(qptr->index + cmd_count < qptr->pool_size_m1)) 377 { 378 uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 379 ptr += qptr->index; 380 qptr->index += cmd_count; 381 while (cmd_count--) 382 *ptr++ = *cmds++; 383 } 384 else 385 { 386 uint64_t *ptr; 387 int count; 388 /* We need a new command buffer. Fail if there isn't one available */ 389 uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool); 390 if (cvmx_unlikely(new_buffer == NULL)) 391 { 392 if (cvmx_likely(use_locking)) 393 __cvmx_cmd_queue_unlock(qptr); 394 return CVMX_CMD_QUEUE_NO_MEMORY; 395 } 396 ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 397 /* Figure out how many command words will fit in this buffer. One 398 location will be needed for the next buffer pointer */ 399 count = qptr->pool_size_m1 - qptr->index; 400 ptr += qptr->index; 401 cmd_count-=count; 402 while (count--) 403 *ptr++ = *cmds++; 404 *ptr = cvmx_ptr_to_phys(new_buffer); 405 /* The current buffer is full and has a link to the next buffer. Time 406 to write the rest of the commands into the new buffer */ 407 qptr->base_ptr_div128 = *ptr >> 7; 408 qptr->index = cmd_count; 409 ptr = new_buffer; 410 while (cmd_count--) 411 *ptr++ = *cmds++; 412 } 413 414 /* All updates are complete. Release the lock and return */ 415 if (cvmx_likely(use_locking)) 416 __cvmx_cmd_queue_unlock(qptr); 417 return CVMX_CMD_QUEUE_SUCCESS; 418} 419 420 421/** 422 * Simple function to write two command words to a command 423 * queue. 424 * 425 * @param queue_id Hardware command queue to write to 426 * @param use_locking 427 * Use internal locking to ensure exclusive access for queue 428 * updates. If you don't use this locking you must ensure 429 * exclusivity some other way. Locking is strongly recommended. 430 * @param cmd1 Command 431 * @param cmd2 Command 432 * 433 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 434 */ 435static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2) 436{ 437 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 438 439 if (CVMX_ENABLE_PARAMETER_CHECKING) 440 { 441 if (cvmx_unlikely(qptr == NULL)) 442 return CVMX_CMD_QUEUE_INVALID_PARAM; 443 } 444 445 /* Make sure nobody else is updating the same queue */ 446 if (cvmx_likely(use_locking)) 447 __cvmx_cmd_queue_lock(queue_id, qptr); 448 449 /* If a max queue length was specified then make sure we don't 450 exceed it. If any part of the command would be below the limit 451 we allow it */ 452 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth)) 453 { 454 if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) 455 { 456 if (cvmx_likely(use_locking)) 457 __cvmx_cmd_queue_unlock(qptr); 458 return CVMX_CMD_QUEUE_FULL; 459 } 460 } 461 462 /* Normally there is plenty of room in the current buffer for the command */ 463 if (cvmx_likely(qptr->index + 2 < qptr->pool_size_m1)) 464 { 465 uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 466 ptr += qptr->index; 467 qptr->index += 2; 468 ptr[0] = cmd1; 469 ptr[1] = cmd2; 470 } 471 else 472 { 473 uint64_t *ptr; 474 /* Figure out how many command words will fit in this buffer. One 475 location will be needed for the next buffer pointer */ 476 int count = qptr->pool_size_m1 - qptr->index; 477 /* We need a new command buffer. Fail if there isn't one available */ 478 uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool); 479 if (cvmx_unlikely(new_buffer == NULL)) 480 { 481 if (cvmx_likely(use_locking)) 482 __cvmx_cmd_queue_unlock(qptr); 483 return CVMX_CMD_QUEUE_NO_MEMORY; 484 } 485 count--; 486 ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 487 ptr += qptr->index; 488 *ptr++ = cmd1; 489 if (cvmx_likely(count)) 490 *ptr++ = cmd2; 491 *ptr = cvmx_ptr_to_phys(new_buffer); 492 /* The current buffer is full and has a link to the next buffer. Time 493 to write the rest of the commands into the new buffer */ 494 qptr->base_ptr_div128 = *ptr >> 7; 495 qptr->index = 0; 496 if (cvmx_unlikely(count == 0)) 497 { 498 qptr->index = 1; 499 new_buffer[0] = cmd2; 500 } 501 } 502 503 /* All updates are complete. Release the lock and return */ 504 if (cvmx_likely(use_locking)) 505 __cvmx_cmd_queue_unlock(qptr); 506 return CVMX_CMD_QUEUE_SUCCESS; 507} 508 509 510/** 511 * Simple function to write three command words to a command 512 * queue. 513 * 514 * @param queue_id Hardware command queue to write to 515 * @param use_locking 516 * Use internal locking to ensure exclusive access for queue 517 * updates. If you don't use this locking you must ensure 518 * exclusivity some other way. Locking is strongly recommended. 519 * @param cmd1 Command 520 * @param cmd2 Command 521 * @param cmd3 Command 522 * 523 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 524 */ 525static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2, uint64_t cmd3) 526{ 527 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 528 529 if (CVMX_ENABLE_PARAMETER_CHECKING) 530 { 531 if (cvmx_unlikely(qptr == NULL)) 532 return CVMX_CMD_QUEUE_INVALID_PARAM; 533 } 534 535 /* Make sure nobody else is updating the same queue */ 536 if (cvmx_likely(use_locking)) 537 __cvmx_cmd_queue_lock(queue_id, qptr); 538 539 /* If a max queue length was specified then make sure we don't 540 exceed it. If any part of the command would be below the limit 541 we allow it */ 542 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth)) 543 { 544 if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) 545 { 546 if (cvmx_likely(use_locking)) 547 __cvmx_cmd_queue_unlock(qptr); 548 return CVMX_CMD_QUEUE_FULL; 549 } 550 } 551 552 /* Normally there is plenty of room in the current buffer for the command */ 553 if (cvmx_likely(qptr->index + 3 < qptr->pool_size_m1)) 554 { 555 uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 556 ptr += qptr->index; 557 qptr->index += 3; 558 ptr[0] = cmd1; 559 ptr[1] = cmd2; 560 ptr[2] = cmd3; 561 } 562 else 563 { 564 uint64_t *ptr; 565 /* Figure out how many command words will fit in this buffer. One 566 location will be needed for the next buffer pointer */ 567 int count = qptr->pool_size_m1 - qptr->index; 568 /* We need a new command buffer. Fail if there isn't one available */ 569 uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool); 570 if (cvmx_unlikely(new_buffer == NULL)) 571 { 572 if (cvmx_likely(use_locking)) 573 __cvmx_cmd_queue_unlock(qptr); 574 return CVMX_CMD_QUEUE_NO_MEMORY; 575 } 576 count--; 577 ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 578 ptr += qptr->index; 579 *ptr++ = cmd1; 580 if (count) 581 { 582 *ptr++ = cmd2; 583 if (count > 1) 584 *ptr++ = cmd3; 585 } 586 *ptr = cvmx_ptr_to_phys(new_buffer); 587 /* The current buffer is full and has a link to the next buffer. Time 588 to write the rest of the commands into the new buffer */ 589 qptr->base_ptr_div128 = *ptr >> 7; 590 qptr->index = 0; 591 ptr = new_buffer; 592 if (count == 0) 593 { 594 *ptr++ = cmd2; 595 qptr->index++; 596 } 597 if (count < 2) 598 { 599 *ptr++ = cmd3; 600 qptr->index++; 601 } 602 } 603 604 /* All updates are complete. Release the lock and return */ 605 if (cvmx_likely(use_locking)) 606 __cvmx_cmd_queue_unlock(qptr); 607 return CVMX_CMD_QUEUE_SUCCESS; 608} 609 610#ifdef __cplusplus 611} 612#endif 613 614#endif /* __CVMX_CMD_QUEUE_H__ */ 615