1210284Sjmallett/***********************license start*************** 2232812Sjmallett * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights 3215990Sjmallett * reserved. 4210284Sjmallett * 5210284Sjmallett * 6215990Sjmallett * Redistribution and use in source and binary forms, with or without 7215990Sjmallett * modification, are permitted provided that the following conditions are 8215990Sjmallett * met: 9210284Sjmallett * 10215990Sjmallett * * Redistributions of source code must retain the above copyright 11215990Sjmallett * notice, this list of conditions and the following disclaimer. 12210284Sjmallett * 13215990Sjmallett * * Redistributions in binary form must reproduce the above 14215990Sjmallett * copyright notice, this list of conditions and the following 15215990Sjmallett * disclaimer in the documentation and/or other materials provided 16215990Sjmallett * with the distribution. 17215990Sjmallett 18232812Sjmallett * * Neither the name of Cavium Inc. nor the names of 19215990Sjmallett * its contributors may be used to endorse or promote products 20215990Sjmallett * derived from this software without specific prior written 21215990Sjmallett * permission. 22215990Sjmallett 23215990Sjmallett * This Software, including technical data, may be subject to U.S. export control 24215990Sjmallett * laws, including the U.S. Export Administration Act and its associated 25215990Sjmallett * regulations, and may be subject to export or import regulations in other 26215990Sjmallett * countries. 27215990Sjmallett 28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29232812Sjmallett * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38210284Sjmallett ***********************license end**************************************/ 39210284Sjmallett 40210284Sjmallett 41210284Sjmallett 42210284Sjmallett 43210284Sjmallett 44210284Sjmallett 45215990Sjmallett 46210284Sjmallett/** 47210284Sjmallett * @file 48210284Sjmallett * 49210284Sjmallett * Support functions for managing command queues used for 50210284Sjmallett * various hardware blocks. 51210284Sjmallett * 52232812Sjmallett * <hr>$Revision: 70030 $<hr> 53210284Sjmallett */ 54215990Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 55215990Sjmallett#include <linux/module.h> 56215990Sjmallett#include <asm/octeon/cvmx.h> 57215990Sjmallett#include <asm/octeon/cvmx-bootmem.h> 58215990Sjmallett#include <asm/octeon/cvmx-npei-defs.h> 59215990Sjmallett#include <asm/octeon/cvmx-pexp-defs.h> 60215990Sjmallett#include <asm/octeon/cvmx-dpi-defs.h> 61215990Sjmallett#include <asm/octeon/cvmx-pko-defs.h> 62215990Sjmallett#include <asm/octeon/cvmx-config.h> 63215990Sjmallett#include <asm/octeon/cvmx-fpa.h> 64215990Sjmallett#include <asm/octeon/cvmx-cmd-queue.h> 65215990Sjmallett#else 66210284Sjmallett#include "cvmx.h" 67215990Sjmallett#include "cvmx-bootmem.h" 68215990Sjmallett#if !defined(__FreeBSD__) || !defined(_KERNEL) 69215990Sjmallett#include "cvmx-config.h" 70215990Sjmallett#endif 71210284Sjmallett#include "cvmx-fpa.h" 72210284Sjmallett#include "cvmx-cmd-queue.h" 73215990Sjmallett#endif 74210284Sjmallett 75215990Sjmallett 76210284Sjmallett/** 77210284Sjmallett * This application uses this pointer to access the global queue 78210284Sjmallett * state. It points to a bootmem named block. 79210284Sjmallett */ 80215990SjmallettCVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr; 81215990Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 82215990SjmallettEXPORT_SYMBOL(__cvmx_cmd_queue_state_ptr); 83215990Sjmallett#endif 84210284Sjmallett 85210284Sjmallett/** 86210284Sjmallett * @INTERNAL 87210284Sjmallett * Initialize the Global queue state pointer. 88210284Sjmallett * 89210284Sjmallett * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 90210284Sjmallett */ 91210284Sjmallettstatic cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void) 92210284Sjmallett{ 93210284Sjmallett char *alloc_name = "cvmx_cmd_queues"; 94210284Sjmallett#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32 95210284Sjmallett extern uint64_t octeon_reserve32_memory; 96210284Sjmallett#endif 97210284Sjmallett 98210284Sjmallett if (cvmx_likely(__cvmx_cmd_queue_state_ptr)) 99210284Sjmallett return CVMX_CMD_QUEUE_SUCCESS; 100210284Sjmallett 101210284Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 102215990Sjmallett#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32 103210284Sjmallett if (octeon_reserve32_memory) 104210284Sjmallett __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr), 105210284Sjmallett octeon_reserve32_memory, 106210284Sjmallett octeon_reserve32_memory + (CONFIG_CAVIUM_RESERVE32<<20) - 1, 107210284Sjmallett 128, alloc_name); 108210284Sjmallett else 109210284Sjmallett#endif 110210284Sjmallett __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name); 111210284Sjmallett#else 112210284Sjmallett __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name); 113210284Sjmallett#endif 114210284Sjmallett if (__cvmx_cmd_queue_state_ptr) 115210284Sjmallett memset(__cvmx_cmd_queue_state_ptr, 0, sizeof(*__cvmx_cmd_queue_state_ptr)); 116210284Sjmallett else 117210284Sjmallett { 118215990Sjmallett const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name); 119210284Sjmallett if (block_desc) 120210284Sjmallett __cvmx_cmd_queue_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr); 121210284Sjmallett else 122210284Sjmallett { 123210284Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", alloc_name); 124210284Sjmallett return CVMX_CMD_QUEUE_NO_MEMORY; 125210284Sjmallett } 126210284Sjmallett } 127210284Sjmallett return CVMX_CMD_QUEUE_SUCCESS; 128210284Sjmallett} 129210284Sjmallett 130210284Sjmallett 131210284Sjmallett/** 132210284Sjmallett * Initialize a command queue for use. The initial FPA buffer is 133210284Sjmallett * allocated and the hardware unit is configured to point to the 134210284Sjmallett * new command queue. 135210284Sjmallett * 136210284Sjmallett * @param queue_id Hardware command queue to initialize. 137210284Sjmallett * @param max_depth Maximum outstanding commands that can be queued. 138210284Sjmallett * @param fpa_pool FPA pool the command queues should come from. 139210284Sjmallett * @param pool_size Size of each buffer in the FPA pool (bytes) 140210284Sjmallett * 141210284Sjmallett * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 142210284Sjmallett */ 143210284Sjmallettcvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size) 144210284Sjmallett{ 145210284Sjmallett __cvmx_cmd_queue_state_t *qstate; 146210284Sjmallett cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr(); 147210284Sjmallett if (result != CVMX_CMD_QUEUE_SUCCESS) 148210284Sjmallett return result; 149210284Sjmallett 150210284Sjmallett qstate = __cvmx_cmd_queue_get_state(queue_id); 151210284Sjmallett if (qstate == NULL) 152210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 153210284Sjmallett 154210284Sjmallett /* We artificially limit max_depth to 1<<20 words. It is an arbitrary limit */ 155210284Sjmallett if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) 156210284Sjmallett { 157210284Sjmallett if ((max_depth < 0) || (max_depth > 1<<20)) 158210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 159210284Sjmallett } 160210284Sjmallett else if (max_depth != 0) 161210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 162210284Sjmallett 163210284Sjmallett if ((fpa_pool < 0) || (fpa_pool > 7)) 164210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 165210284Sjmallett if ((pool_size < 128) || (pool_size > 65536)) 166210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 167210284Sjmallett 168210284Sjmallett /* See if someone else has already initialized the queue */ 169210284Sjmallett if (qstate->base_ptr_div128) 170210284Sjmallett { 171210284Sjmallett if (max_depth != (int)qstate->max_depth) 172210284Sjmallett { 173215990Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different max_depth (%d).\n", (int)qstate->max_depth); 174210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 175210284Sjmallett } 176210284Sjmallett if (fpa_pool != qstate->fpa_pool) 177210284Sjmallett { 178215990Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool (%u).\n", qstate->fpa_pool); 179210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 180210284Sjmallett } 181210284Sjmallett if ((pool_size>>3)-1 != qstate->pool_size_m1) 182210284Sjmallett { 183215990Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool size (%u).\n", (qstate->pool_size_m1+1)<<3); 184210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 185210284Sjmallett } 186210284Sjmallett CVMX_SYNCWS; 187210284Sjmallett return CVMX_CMD_QUEUE_ALREADY_SETUP; 188210284Sjmallett } 189210284Sjmallett else 190210284Sjmallett { 191210284Sjmallett cvmx_fpa_ctl_status_t status; 192210284Sjmallett void *buffer; 193210284Sjmallett 194210284Sjmallett status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS); 195210284Sjmallett if (!status.s.enb) 196210284Sjmallett { 197210284Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: FPA is not enabled.\n"); 198210284Sjmallett return CVMX_CMD_QUEUE_NO_MEMORY; 199210284Sjmallett } 200210284Sjmallett buffer = cvmx_fpa_alloc(fpa_pool); 201210284Sjmallett if (buffer == NULL) 202210284Sjmallett { 203210284Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to allocate initial buffer.\n"); 204210284Sjmallett return CVMX_CMD_QUEUE_NO_MEMORY; 205210284Sjmallett } 206210284Sjmallett 207210284Sjmallett memset(qstate, 0, sizeof(*qstate)); 208210284Sjmallett qstate->max_depth = max_depth; 209210284Sjmallett qstate->fpa_pool = fpa_pool; 210210284Sjmallett qstate->pool_size_m1 = (pool_size>>3)-1; 211210284Sjmallett qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128; 212210284Sjmallett /* We zeroed the now serving field so we need to also zero the ticket */ 213210284Sjmallett __cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0; 214210284Sjmallett CVMX_SYNCWS; 215210284Sjmallett return CVMX_CMD_QUEUE_SUCCESS; 216210284Sjmallett } 217210284Sjmallett} 218210284Sjmallett 219210284Sjmallett 220210284Sjmallett/** 221210284Sjmallett * Shutdown a queue a free it's command buffers to the FPA. The 222210284Sjmallett * hardware connected to the queue must be stopped before this 223210284Sjmallett * function is called. 224210284Sjmallett * 225210284Sjmallett * @param queue_id Queue to shutdown 226210284Sjmallett * 227210284Sjmallett * @return CVMX_CMD_QUEUE_SUCCESS or a failure code 228210284Sjmallett */ 229210284Sjmallettcvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id) 230210284Sjmallett{ 231210284Sjmallett __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 232210284Sjmallett if (qptr == NULL) 233210284Sjmallett { 234210284Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to get queue information.\n"); 235210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 236210284Sjmallett } 237210284Sjmallett 238210284Sjmallett if (cvmx_cmd_queue_length(queue_id) > 0) 239210284Sjmallett { 240210284Sjmallett cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still has data in it.\n"); 241210284Sjmallett return CVMX_CMD_QUEUE_FULL; 242210284Sjmallett } 243210284Sjmallett 244210284Sjmallett __cvmx_cmd_queue_lock(queue_id, qptr); 245210284Sjmallett if (qptr->base_ptr_div128) 246210284Sjmallett { 247210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7), qptr->fpa_pool, 0); 248210284Sjmallett qptr->base_ptr_div128 = 0; 249210284Sjmallett } 250210284Sjmallett __cvmx_cmd_queue_unlock(qptr); 251210284Sjmallett 252210284Sjmallett return CVMX_CMD_QUEUE_SUCCESS; 253210284Sjmallett} 254210284Sjmallett 255210284Sjmallett 256210284Sjmallett/** 257210284Sjmallett * Return the number of command words pending in the queue. This 258210284Sjmallett * function may be relatively slow for some hardware units. 259210284Sjmallett * 260210284Sjmallett * @param queue_id Hardware command queue to query 261210284Sjmallett * 262210284Sjmallett * @return Number of outstanding commands 263210284Sjmallett */ 264210284Sjmallettint cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id) 265210284Sjmallett{ 266210284Sjmallett if (CVMX_ENABLE_PARAMETER_CHECKING) 267210284Sjmallett { 268210284Sjmallett if (__cvmx_cmd_queue_get_state(queue_id) == NULL) 269210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 270210284Sjmallett } 271210284Sjmallett 272210284Sjmallett /* The cast is here so gcc with check that all values in the 273210284Sjmallett cvmx_cmd_queue_id_t enumeration are here */ 274210284Sjmallett switch ((cvmx_cmd_queue_id_t)(queue_id & 0xff0000)) 275210284Sjmallett { 276210284Sjmallett case CVMX_CMD_QUEUE_PKO_BASE: 277210284Sjmallett /* FIXME: Need atomic lock on CVMX_PKO_REG_READ_IDX. Right now we 278210284Sjmallett are normally called with the queue lock, so that is a SLIGHT 279210284Sjmallett amount of protection */ 280210284Sjmallett cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff); 281210284Sjmallett if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) 282210284Sjmallett { 283210284Sjmallett cvmx_pko_mem_debug9_t debug9; 284210284Sjmallett debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9); 285210284Sjmallett return debug9.cn38xx.doorbell; 286210284Sjmallett } 287210284Sjmallett else 288210284Sjmallett { 289210284Sjmallett cvmx_pko_mem_debug8_t debug8; 290210284Sjmallett debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); 291232812Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PKND)) 292232812Sjmallett return debug8.cn68xx.doorbell; 293232812Sjmallett else 294232812Sjmallett return debug8.cn58xx.doorbell; 295210284Sjmallett } 296210284Sjmallett case CVMX_CMD_QUEUE_ZIP: 297210284Sjmallett case CVMX_CMD_QUEUE_DFA: 298210284Sjmallett case CVMX_CMD_QUEUE_RAID: 299210284Sjmallett // FIXME: Implement other lengths 300210284Sjmallett return 0; 301210284Sjmallett case CVMX_CMD_QUEUE_DMA_BASE: 302215990Sjmallett if (octeon_has_feature(OCTEON_FEATURE_NPEI)) 303210284Sjmallett { 304210284Sjmallett cvmx_npei_dmax_counts_t dmax_counts; 305210284Sjmallett dmax_counts.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS(queue_id & 0x7)); 306210284Sjmallett return dmax_counts.s.dbell; 307210284Sjmallett } 308215990Sjmallett else 309215990Sjmallett { 310215990Sjmallett cvmx_dpi_dmax_counts_t dmax_counts; 311215990Sjmallett dmax_counts.u64 = cvmx_read_csr(CVMX_DPI_DMAX_COUNTS(queue_id & 0x7)); 312215990Sjmallett return dmax_counts.s.dbell; 313215990Sjmallett } 314210284Sjmallett case CVMX_CMD_QUEUE_END: 315210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 316210284Sjmallett } 317210284Sjmallett return CVMX_CMD_QUEUE_INVALID_PARAM; 318210284Sjmallett} 319210284Sjmallett 320210284Sjmallett 321210284Sjmallett/** 322210284Sjmallett * Return the command buffer to be written to. The purpose of this 323215990Sjmallett * function is to allow CVMX routine access to the low level buffer 324210284Sjmallett * for initial hardware setup. User applications should not call this 325210284Sjmallett * function directly. 326210284Sjmallett * 327210284Sjmallett * @param queue_id Command queue to query 328210284Sjmallett * 329210284Sjmallett * @return Command buffer or NULL on failure 330210284Sjmallett */ 331210284Sjmallettvoid *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id) 332210284Sjmallett{ 333210284Sjmallett __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); 334210284Sjmallett if (qptr && qptr->base_ptr_div128) 335210284Sjmallett return cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7); 336210284Sjmallett else 337210284Sjmallett return NULL; 338210284Sjmallett} 339210284Sjmallett 340