cvmx-cmd-queue.h revision 232812
10SN/A/***********************license start***************
213769Salanb * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
30SN/A * reserved.
40SN/A *
50SN/A *
60SN/A * Redistribution and use in source and binary forms, with or without
72362SN/A * modification, are permitted provided that the following conditions are
80SN/A * met:
92362SN/A *
100SN/A *   * Redistributions of source code must retain the above copyright
110SN/A *     notice, this list of conditions and the following disclaimer.
120SN/A *
130SN/A *   * Redistributions in binary form must reproduce the above
140SN/A *     copyright notice, this list of conditions and the following
150SN/A *     disclaimer in the documentation and/or other materials provided
160SN/A *     with the distribution.
170SN/A
180SN/A *   * Neither the name of Cavium Inc. nor the names of
190SN/A *     its contributors may be used to endorse or promote products
200SN/A *     derived from this software without specific prior written
212362SN/A *     permission.
222362SN/A
232362SN/A * This Software, including technical data, may be subject to U.S. export  control
240SN/A * laws, including the U.S. Export Administration Act and its  associated
250SN/A * regulations, and may be subject to export or import  regulations in other
260SN/A * countries.
270SN/A
2814181Sredestad * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
290SN/A * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
3014181Sredestad * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
310SN/A * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
320SN/A * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
330SN/A * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
340SN/A * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
350SN/A * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
360SN/A * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
370SN/A * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
380SN/A ***********************license end**************************************/
390SN/A
400SN/A
410SN/A
420SN/A
430SN/A
440SN/A
450SN/A
460SN/A/**
470SN/A * @file
480SN/A *
490SN/A * Support functions for managing command queues used for
500SN/A * various hardware blocks.
5114181Sredestad *
527401SN/A * The common command queue infrastructure abstracts out the
537401SN/A * software necessary for adding to Octeon's chained queue
5414181Sredestad * structures. These structures are used for commands to the
557401SN/A * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
560SN/A * hardware unit takes commands and CSRs of different types,
5714181Sredestad * they all use basic linked command buffers to store the
5814181Sredestad * pending request. In general, users of the CVMX API don't
5914181Sredestad * call cvmx-cmd-queue functions directly. Instead the hardware
600SN/A * unit specific wrapper should be used. The wrappers perform
6114181Sredestad * unit specific validation and CSR writes to submit the
6214181Sredestad * commands.
6314181Sredestad *
6414181Sredestad * Even though most software will never directly interact with
6514181Sredestad * cvmx-cmd-queue, knowledge of its internal workings can help
660SN/A * in diagnosing performance problems and help with debugging.
670SN/A *
680SN/A * Command queue pointers are stored in a global named block
690SN/A * called "cvmx_cmd_queues". Except for the PKO queues, each
700SN/A * hardware queue is stored in its own cache line to reduce SMP
710SN/A * contention on spin locks. The PKO queues are stored such that
7214181Sredestad * every 16th queue is next to each other in memory. This scheme
737401SN/A * allows for queues being in separate cache lines when there
740SN/A * are low number of queues per port. With 16 queues per port,
757401SN/A * the first queue for each port is in the same cache area. The
760SN/A * second queues for each port are in another area, etc. This
770SN/A * allows software to implement very efficient lockless PKO with
780SN/A * 16 queues per port using a minimum of cache lines per core.
790SN/A * All queues for a given core will be isolated in the same
800SN/A * cache area.
810SN/A *
820SN/A * In addition to the memory pointer layout, cvmx-cmd-queue
8314183Sredestad * provides an optimized fair ll/sc locking mechanism for the
847401SN/A * queues. The lock uses a "ticket / now serving" model to
850SN/A * maintain fair order on contended locks. In addition, it uses
867401SN/A * predicted locking time to limit cache contention. When a core
870SN/A * know it must wait in line for a lock, it spins on the
880SN/A * internal cycle counter to completely eliminate any causes of
890SN/A * bus traffic.
900SN/A *
910SN/A * <hr> $Revision: 70030 $ <hr>
920SN/A */
930SN/A
940SN/A#ifndef __CVMX_CMD_QUEUE_H__
950SN/A#define __CVMX_CMD_QUEUE_H__
960SN/A
970SN/A#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
980SN/A#include "executive-config.h"
990SN/A#include "cvmx-config.h"
1000SN/A#endif
1010SN/A
1020SN/A#include "cvmx-fpa.h"
1030SN/A
1040SN/A#ifdef	__cplusplus
1050SN/Aextern "C" {
1060SN/A#endif
1070SN/A
1080SN/A/**
1090SN/A * By default we disable the max depth support. Most programs
1100SN/A * don't use it and it slows down the command queue processing
1110SN/A * significantly.
1120SN/A */
1130SN/A#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
1140SN/A#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
1150SN/A#endif
1160SN/A
1170SN/A/**
1180SN/A * Enumeration representing all hardware blocks that use command
1190SN/A * queues. Each hardware block has up to 65536 sub identifiers for
1200SN/A * multiple command queues. Not all chips support all hardware
1210SN/A * units.
1220SN/A */
1230SN/Atypedef enum
1240SN/A{
1250SN/A    CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
1260SN/A#define CVMX_CMD_QUEUE_PKO(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
1270SN/A    CVMX_CMD_QUEUE_ZIP      = 0x10000,
1280SN/A#define CVMX_CMD_QUEUE_ZIP_QUE(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff&(queue))))
1290SN/A    CVMX_CMD_QUEUE_DFA      = 0x20000,
1300SN/A    CVMX_CMD_QUEUE_RAID     = 0x30000,
1310SN/A    CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
1323498SN/A#define CVMX_CMD_QUEUE_DMA(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
1333498SN/A    CVMX_CMD_QUEUE_END      = 0x50000,
1343498SN/A} cvmx_cmd_queue_id_t;
1350SN/A
1360SN/A/**
1370SN/A * Command write operations can fail if the command queue needs
1380SN/A * a new buffer and the associated FPA pool is empty. It can also
1390SN/A * fail if the number of queued command words reaches the maximum
1400SN/A * set at initialization.
1410SN/A */
1420SN/Atypedef enum
1430SN/A{
1440SN/A    CVMX_CMD_QUEUE_SUCCESS = 0,
1450SN/A    CVMX_CMD_QUEUE_NO_MEMORY = -1,
1460SN/A    CVMX_CMD_QUEUE_FULL = -2,
1470SN/A    CVMX_CMD_QUEUE_INVALID_PARAM = -3,
1480SN/A    CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
1490SN/A} cvmx_cmd_queue_result_t;
1500SN/A
1510SN/Atypedef struct
15213769Salanb{
15313769Salanb    uint8_t  now_serving;           /**< You have lock when this is your ticket */
15413769Salanb    uint64_t unused1        : 24;
15513769Salanb    uint32_t max_depth;             /**< Maximum outstanding command words */
1560SN/A    uint64_t fpa_pool       : 3;    /**< FPA pool buffers come from */
1570SN/A    uint64_t base_ptr_div128: 29;   /**< Top of command buffer pointer shifted 7 */
1580SN/A    uint64_t unused2        : 6;
1590SN/A    uint64_t pool_size_m1   : 13;   /**< FPA buffer size in 64bit words minus 1 */
16013769Salanb    uint64_t index          : 13;   /**< Number of commands already used in buffer */
16113769Salanb} __cvmx_cmd_queue_state_t;
16213769Salanb
16313769Salanb/**
1640SN/A * This structure contains the global state of all command queues.
1650SN/A * It is stored in a bootmem named block and shared by all
1660SN/A * applications running on Octeon. Tickets are stored in a different
1670SN/A * cache line that queue information to reduce the contention on the
1680SN/A * ll/sc used to get a ticket. If this is not the case, the update
1690SN/A * of queue state causes the ll/sc to fail quite often.
1700SN/A */
1713498SN/Atypedef struct
1720SN/A{
1730SN/A    uint64_t                 ticket[(CVMX_CMD_QUEUE_END>>16) * 256];
1740SN/A    __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END>>16) * 256];
1750SN/A} __cvmx_cmd_queue_all_state_t;
1760SN/A
1770SN/Aextern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
1788284SN/A
1798284SN/A/**
1808284SN/A * Initialize a command queue for use. The initial FPA buffer is
1818284SN/A * allocated and the hardware unit is configured to point to the
1828284SN/A * new command queue.
1838284SN/A *
1848284SN/A * @param queue_id  Hardware command queue to initialize.
1858284SN/A * @param max_depth Maximum outstanding commands that can be queued.
1868284SN/A * @param fpa_pool  FPA pool the command queues should come from.
1878284SN/A * @param pool_size Size of each buffer in the FPA pool (bytes)
1888284SN/A *
1890SN/A * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
1900SN/A */
1910SN/Acvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size);
1920SN/A
1930SN/A/**
1940SN/A * Shutdown a queue a free it's command buffers to the FPA. The
1950SN/A * hardware connected to the queue must be stopped before this
1960SN/A * function is called.
1970SN/A *
1980SN/A * @param queue_id Queue to shutdown
1990SN/A *
2000SN/A * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
2010SN/A */
2020SN/Acvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
2030SN/A
2040SN/A/**
2050SN/A * Return the number of command words pending in the queue. This
2060SN/A * function may be relatively slow for some hardware units.
2070SN/A *
2080SN/A * @param queue_id Hardware command queue to query
2090SN/A *
2100SN/A * @return Number of outstanding commands
2110SN/A */
2120SN/Aint cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
2130SN/A
2140SN/A/**
2150SN/A * Return the command buffer to be written to. The purpose of this
2160SN/A * function is to allow CVMX routine access to the low level buffer
2170SN/A * for initial hardware setup. User applications should not call this
2180SN/A * function directly.
2190SN/A *
2203498SN/A * @param queue_id Command queue to query
2213498SN/A *
2223498SN/A * @return Command buffer or NULL on failure
2233498SN/A */
2243498SN/Avoid *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
2253498SN/A
2260SN/A/**
2270SN/A * @INTERNAL
2280SN/A * Get the index into the state arrays for the supplied queue id.
2293498SN/A *
2303498SN/A * @param queue_id Queue ID to get an index for
2313498SN/A *
2323498SN/A * @return Index into the state arrays
2333498SN/A */
2343498SN/Astatic inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
2350SN/A{
2360SN/A    /* Warning: This code currently only works with devices that have 256 queues
2370SN/A        or less. Devices with more than 16 queues are laid out in memory to allow
2380SN/A        cores quick access to every 16th queue. This reduces cache thrashing
2390SN/A        when you are running 16 queues per port to support lockless operation */
2400SN/A    int unit = queue_id>>16;
2410SN/A    int q = (queue_id >> 4) & 0xf;
2420SN/A    int core = queue_id & 0xf;
2430SN/A    return unit*256 + core*16 + q;
2440SN/A}
2450SN/A
2460SN/A
2470SN/A/**
2480SN/A * @INTERNAL
2490SN/A * Lock the supplied queue so nobody else is updating it at the same
2500SN/A * time as us.
2510SN/A *
2520SN/A * @param queue_id Queue ID to lock
2530SN/A * @param qptr     Pointer to the queue's global state
2540SN/A */
2550SN/Astatic inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, __cvmx_cmd_queue_state_t *qptr)
2560SN/A{
2570SN/A    int tmp;
2580SN/A    int my_ticket;
2590SN/A    CVMX_PREFETCH(qptr, 0);
2600SN/A    asm volatile (
2610SN/A        ".set push\n"
2620SN/A        ".set noreorder\n"
2630SN/A        "1:\n"
2640SN/A        "ll     %[my_ticket], %[ticket_ptr]\n"          /* Atomic add one to ticket_ptr */
2650SN/A        "li     %[ticket], 1\n"                         /*    and store the original value */
2660SN/A        "baddu  %[ticket], %[my_ticket]\n"              /*    in my_ticket */
2670SN/A        "sc     %[ticket], %[ticket_ptr]\n"
2680SN/A        "beqz   %[ticket], 1b\n"
2690SN/A        " nop\n"
2700SN/A        "lbu    %[ticket], %[now_serving]\n"            /* Load the current now_serving ticket */
2710SN/A        "2:\n"
2720SN/A        "beq    %[ticket], %[my_ticket], 4f\n"          /* Jump out if now_serving == my_ticket */
2730SN/A        " subu   %[ticket], %[my_ticket], %[ticket]\n"  /* Find out how many tickets are in front of me */
2740SN/A        "subu  %[ticket], 1\n"                          /* Use tickets in front of me minus one to delay */
2750SN/A        "cins   %[ticket], %[ticket], 5, 7\n"           /* Delay will be ((tickets in front)-1)*32 loops */
2760SN/A        "3:\n"
2770SN/A        "bnez   %[ticket], 3b\n"                        /* Loop here until our ticket might be up */
2780SN/A        " subu  %[ticket], 1\n"
2790SN/A        "b      2b\n"                                   /* Jump back up to check out ticket again */
2800SN/A        " lbu   %[ticket], %[now_serving]\n"            /* Load the current now_serving ticket */
2810SN/A        "4:\n"
2820SN/A        ".set pop\n"
2830SN/A        : [ticket_ptr] "=m" (__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
2840SN/A          [now_serving] "=m" (qptr->now_serving),
2850SN/A          [ticket] "=r" (tmp),
2860SN/A          [my_ticket] "=r" (my_ticket)
2870SN/A    );
2880SN/A}
2890SN/A
2900SN/A
2910SN/A/**
2920SN/A * @INTERNAL
2930SN/A * Unlock the queue, flushing all writes.
2940SN/A *
2950SN/A * @param qptr   Queue to unlock
2960SN/A */
2970SN/Astatic inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
2980SN/A{
2990SN/A    uint8_t ns;
3000SN/A
3010SN/A    ns = qptr->now_serving + 1;
3020SN/A    CVMX_SYNCWS; /* Order queue manipulation with respect to the unlock.  */
3030SN/A    qptr->now_serving = ns;
3040SN/A    CVMX_SYNCWS; /* nudge out the unlock. */
3050SN/A}
3060SN/A
3070SN/A
3080SN/A/**
3090SN/A * @INTERNAL
3100SN/A * Get the queue state structure for the given queue id
3110SN/A *
3120SN/A * @param queue_id Queue id to get
3130SN/A *
3140SN/A * @return Queue structure or NULL on failure
3150SN/A */
3160SN/Astatic inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
3170SN/A{
3180SN/A    if (CVMX_ENABLE_PARAMETER_CHECKING)
31913769Salanb    {
32013769Salanb        if (cvmx_unlikely(queue_id >= CVMX_CMD_QUEUE_END))
32113769Salanb            return NULL;
32213769Salanb        if (cvmx_unlikely((queue_id & 0xffff) >= 256))
3234744SN/A            return NULL;
3240SN/A    }
3250SN/A    return &__cvmx_cmd_queue_state_ptr->state[__cvmx_cmd_queue_get_index(queue_id)];
3260SN/A}
32713769Salanb
32813769Salanb
32913769Salanb/**
33013769Salanb * Write an arbitrary number of command words to a command queue.
3310SN/A * This is a generic function; the fixed number of command word
3320SN/A * functions yield higher performance.
3330SN/A *
3340SN/A * @param queue_id  Hardware command queue to write to
3350SN/A * @param use_locking
3360SN/A *                  Use internal locking to ensure exclusive access for queue
33713769Salanb *                  updates. If you don't use this locking you must ensure
33813769Salanb *                  exclusivity some other way. Locking is strongly recommended.
33913769Salanb * @param cmd_count Number of command words to write
34013769Salanb * @param cmds      Array of commands to write
34113769Salanb *
3420SN/A * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
343 */
344static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, int use_locking, int cmd_count, uint64_t *cmds)
345{
346    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
347
348    if (CVMX_ENABLE_PARAMETER_CHECKING)
349    {
350        if (cvmx_unlikely(qptr == NULL))
351            return CVMX_CMD_QUEUE_INVALID_PARAM;
352        if (cvmx_unlikely((cmd_count < 1) || (cmd_count > 32)))
353            return CVMX_CMD_QUEUE_INVALID_PARAM;
354        if (cvmx_unlikely(cmds == NULL))
355            return CVMX_CMD_QUEUE_INVALID_PARAM;
356    }
357
358    /* Make sure nobody else is updating the same queue */
359    if (cvmx_likely(use_locking))
360        __cvmx_cmd_queue_lock(queue_id, qptr);
361
362    /* If a max queue length was specified then make sure we don't
363        exceed it. If any part of the command would be below the limit
364        we allow it */
365    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
366    {
367        if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
368        {
369            if (cvmx_likely(use_locking))
370                __cvmx_cmd_queue_unlock(qptr);
371            return CVMX_CMD_QUEUE_FULL;
372        }
373    }
374
375    /* Normally there is plenty of room in the current buffer for the command */
376    if (cvmx_likely(qptr->index + cmd_count < qptr->pool_size_m1))
377    {
378        uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
379        ptr += qptr->index;
380        qptr->index += cmd_count;
381        while (cmd_count--)
382            *ptr++ = *cmds++;
383    }
384    else
385    {
386        uint64_t *ptr;
387        int count;
388        /* We need a new command buffer. Fail if there isn't one available */
389        uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
390        if (cvmx_unlikely(new_buffer == NULL))
391        {
392            if (cvmx_likely(use_locking))
393                __cvmx_cmd_queue_unlock(qptr);
394            return CVMX_CMD_QUEUE_NO_MEMORY;
395        }
396        ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
397        /* Figure out how many command words will fit in this buffer. One
398            location will be needed for the next buffer pointer */
399        count = qptr->pool_size_m1 - qptr->index;
400        ptr += qptr->index;
401        cmd_count-=count;
402        while (count--)
403            *ptr++ = *cmds++;
404        *ptr = cvmx_ptr_to_phys(new_buffer);
405        /* The current buffer is full and has a link to the next buffer. Time
406            to write the rest of the commands into the new buffer */
407        qptr->base_ptr_div128 = *ptr >> 7;
408        qptr->index = cmd_count;
409        ptr = new_buffer;
410        while (cmd_count--)
411            *ptr++ = *cmds++;
412    }
413
414    /* All updates are complete. Release the lock and return */
415    if (cvmx_likely(use_locking))
416        __cvmx_cmd_queue_unlock(qptr);
417    return CVMX_CMD_QUEUE_SUCCESS;
418}
419
420
421/**
422 * Simple function to write two command words to a command
423 * queue.
424 *
425 * @param queue_id Hardware command queue to write to
426 * @param use_locking
427 *                 Use internal locking to ensure exclusive access for queue
428 *                 updates. If you don't use this locking you must ensure
429 *                 exclusivity some other way. Locking is strongly recommended.
430 * @param cmd1     Command
431 * @param cmd2     Command
432 *
433 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
434 */
435static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2)
436{
437    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
438
439    if (CVMX_ENABLE_PARAMETER_CHECKING)
440    {
441        if (cvmx_unlikely(qptr == NULL))
442            return CVMX_CMD_QUEUE_INVALID_PARAM;
443    }
444
445    /* Make sure nobody else is updating the same queue */
446    if (cvmx_likely(use_locking))
447        __cvmx_cmd_queue_lock(queue_id, qptr);
448
449    /* If a max queue length was specified then make sure we don't
450        exceed it. If any part of the command would be below the limit
451        we allow it */
452    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
453    {
454        if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
455        {
456            if (cvmx_likely(use_locking))
457                __cvmx_cmd_queue_unlock(qptr);
458            return CVMX_CMD_QUEUE_FULL;
459        }
460    }
461
462    /* Normally there is plenty of room in the current buffer for the command */
463    if (cvmx_likely(qptr->index + 2 < qptr->pool_size_m1))
464    {
465        uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
466        ptr += qptr->index;
467        qptr->index += 2;
468        ptr[0] = cmd1;
469        ptr[1] = cmd2;
470    }
471    else
472    {
473        uint64_t *ptr;
474        /* Figure out how many command words will fit in this buffer. One
475            location will be needed for the next buffer pointer */
476        int count = qptr->pool_size_m1 - qptr->index;
477        /* We need a new command buffer. Fail if there isn't one available */
478        uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
479        if (cvmx_unlikely(new_buffer == NULL))
480        {
481            if (cvmx_likely(use_locking))
482                __cvmx_cmd_queue_unlock(qptr);
483            return CVMX_CMD_QUEUE_NO_MEMORY;
484        }
485        count--;
486        ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
487        ptr += qptr->index;
488        *ptr++ = cmd1;
489        if (cvmx_likely(count))
490            *ptr++ = cmd2;
491        *ptr = cvmx_ptr_to_phys(new_buffer);
492        /* The current buffer is full and has a link to the next buffer. Time
493            to write the rest of the commands into the new buffer */
494        qptr->base_ptr_div128 = *ptr >> 7;
495        qptr->index = 0;
496        if (cvmx_unlikely(count == 0))
497        {
498            qptr->index = 1;
499            new_buffer[0] = cmd2;
500        }
501    }
502
503    /* All updates are complete. Release the lock and return */
504    if (cvmx_likely(use_locking))
505        __cvmx_cmd_queue_unlock(qptr);
506    return CVMX_CMD_QUEUE_SUCCESS;
507}
508
509
510/**
511 * Simple function to write three command words to a command
512 * queue.
513 *
514 * @param queue_id Hardware command queue to write to
515 * @param use_locking
516 *                 Use internal locking to ensure exclusive access for queue
517 *                 updates. If you don't use this locking you must ensure
518 *                 exclusivity some other way. Locking is strongly recommended.
519 * @param cmd1     Command
520 * @param cmd2     Command
521 * @param cmd3     Command
522 *
523 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
524 */
525static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2, uint64_t cmd3)
526{
527    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
528
529    if (CVMX_ENABLE_PARAMETER_CHECKING)
530    {
531        if (cvmx_unlikely(qptr == NULL))
532            return CVMX_CMD_QUEUE_INVALID_PARAM;
533    }
534
535    /* Make sure nobody else is updating the same queue */
536    if (cvmx_likely(use_locking))
537        __cvmx_cmd_queue_lock(queue_id, qptr);
538
539    /* If a max queue length was specified then make sure we don't
540        exceed it. If any part of the command would be below the limit
541        we allow it */
542    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
543    {
544        if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
545        {
546            if (cvmx_likely(use_locking))
547                __cvmx_cmd_queue_unlock(qptr);
548            return CVMX_CMD_QUEUE_FULL;
549        }
550    }
551
552    /* Normally there is plenty of room in the current buffer for the command */
553    if (cvmx_likely(qptr->index + 3 < qptr->pool_size_m1))
554    {
555        uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
556        ptr += qptr->index;
557        qptr->index += 3;
558        ptr[0] = cmd1;
559        ptr[1] = cmd2;
560        ptr[2] = cmd3;
561    }
562    else
563    {
564        uint64_t *ptr;
565        /* Figure out how many command words will fit in this buffer. One
566            location will be needed for the next buffer pointer */
567        int count = qptr->pool_size_m1 - qptr->index;
568        /* We need a new command buffer. Fail if there isn't one available */
569        uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
570        if (cvmx_unlikely(new_buffer == NULL))
571        {
572            if (cvmx_likely(use_locking))
573                __cvmx_cmd_queue_unlock(qptr);
574            return CVMX_CMD_QUEUE_NO_MEMORY;
575        }
576        count--;
577        ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
578        ptr += qptr->index;
579        *ptr++ = cmd1;
580        if (count)
581        {
582            *ptr++ = cmd2;
583            if (count > 1)
584                *ptr++ = cmd3;
585        }
586        *ptr = cvmx_ptr_to_phys(new_buffer);
587        /* The current buffer is full and has a link to the next buffer. Time
588            to write the rest of the commands into the new buffer */
589        qptr->base_ptr_div128 = *ptr >> 7;
590        qptr->index = 0;
591        ptr = new_buffer;
592        if (count == 0)
593        {
594            *ptr++ = cmd2;
595            qptr->index++;
596        }
597        if (count < 2)
598        {
599            *ptr++ = cmd3;
600            qptr->index++;
601        }
602    }
603
604    /* All updates are complete. Release the lock and return */
605    if (cvmx_likely(use_locking))
606        __cvmx_cmd_queue_unlock(qptr);
607    return CVMX_CMD_QUEUE_SUCCESS;
608}
609
610#ifdef	__cplusplus
611}
612#endif
613
614#endif /* __CVMX_CMD_QUEUE_H__ */
615