cvmx-dma-engine.c revision 210284
1210284Sjmallett/***********************license start*************** 2210284Sjmallett * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights 3210284Sjmallett * reserved. 4210284Sjmallett * 5210284Sjmallett * 6210284Sjmallett * Redistribution and use in source and binary forms, with or without 7210284Sjmallett * modification, are permitted provided that the following conditions are 8210284Sjmallett * met: 9210284Sjmallett * 10210284Sjmallett * * Redistributions of source code must retain the above copyright 11210284Sjmallett * notice, this list of conditions and the following disclaimer. 12210284Sjmallett * 13210284Sjmallett * * Redistributions in binary form must reproduce the above 14210284Sjmallett * copyright notice, this list of conditions and the following 15210284Sjmallett * disclaimer in the documentation and/or other materials provided 16210284Sjmallett * with the distribution. 17210284Sjmallett * 18210284Sjmallett * * Neither the name of Cavium Networks nor the names of 19210284Sjmallett * its contributors may be used to endorse or promote products 20210284Sjmallett * derived from this software without specific prior written 21210284Sjmallett * permission. 22210284Sjmallett * 23210284Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 24210284Sjmallett * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS 25210284Sjmallett * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH 26210284Sjmallett * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY 27210284Sjmallett * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT 28210284Sjmallett * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES 29210284Sjmallett * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR 30210284Sjmallett * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET 31210284Sjmallett * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT 32210284Sjmallett * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 33210284Sjmallett * 34210284Sjmallett * 35210284Sjmallett * For any questions regarding licensing please contact marketing@caviumnetworks.com 36210284Sjmallett * 37210284Sjmallett ***********************license end**************************************/ 38210284Sjmallett 39210284Sjmallett 40210284Sjmallett 41210284Sjmallett 42210284Sjmallett 43210284Sjmallett 44210284Sjmallett/** 45210284Sjmallett * @file 46210284Sjmallett * 47210284Sjmallett * Interface to the PCI / PCIe DMA engines. These are only avialable 48210284Sjmallett * on chips with PCI / PCIe. 49210284Sjmallett * 50210284Sjmallett * <hr>$Revision: 41586 $<hr> 51210284Sjmallett */ 52210284Sjmallett#include "executive-config.h" 53210284Sjmallett#include "cvmx-config.h" 54210284Sjmallett#include "cvmx.h" 55210284Sjmallett#include "cvmx-cmd-queue.h" 56210284Sjmallett#include "cvmx-dma-engine.h" 57210284Sjmallett 58210284Sjmallett#ifdef CVMX_ENABLE_PKO_FUNCTIONS 59210284Sjmallett 60210284Sjmallett/** 61210284Sjmallett * Return the number of DMA engimes supported by this chip 62210284Sjmallett * 63210284Sjmallett * @return Number of DMA engines 64210284Sjmallett */ 65210284Sjmallettint cvmx_dma_engine_get_num(void) 66210284Sjmallett{ 67210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 68210284Sjmallett { 69210284Sjmallett if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) 70210284Sjmallett return 4; 71210284Sjmallett else 72210284Sjmallett return 5; 73210284Sjmallett } 74210284Sjmallett else 75210284Sjmallett return 2; 76210284Sjmallett} 77210284Sjmallett 78210284Sjmallett/** 79210284Sjmallett * Initialize the DMA engines for use 80210284Sjmallett * 81210284Sjmallett * @return Zero on success, negative on failure 82210284Sjmallett */ 83210284Sjmallettint cvmx_dma_engine_initialize(void) 84210284Sjmallett{ 85210284Sjmallett cvmx_npei_dmax_ibuff_saddr_t dmax_ibuff_saddr; 86210284Sjmallett int engine; 87210284Sjmallett 88210284Sjmallett for (engine=0; engine < cvmx_dma_engine_get_num(); engine++) 89210284Sjmallett { 90210284Sjmallett cvmx_cmd_queue_result_t result; 91210284Sjmallett result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DMA(engine), 92210284Sjmallett 0, CVMX_FPA_OUTPUT_BUFFER_POOL, 93210284Sjmallett CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE); 94210284Sjmallett if (result != CVMX_CMD_QUEUE_SUCCESS) 95210284Sjmallett return -1; 96210284Sjmallett dmax_ibuff_saddr.u64 = 0; 97210284Sjmallett dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7; 98210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 99210284Sjmallett cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), dmax_ibuff_saddr.u64); 100210284Sjmallett else 101210284Sjmallett { 102210284Sjmallett if (engine) 103210284Sjmallett cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, dmax_ibuff_saddr.u64); 104210284Sjmallett else 105210284Sjmallett cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, dmax_ibuff_saddr.u64); 106210284Sjmallett } 107210284Sjmallett } 108210284Sjmallett 109210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 110210284Sjmallett { 111210284Sjmallett cvmx_npei_dma_control_t dma_control; 112210284Sjmallett dma_control.u64 = 0; 113210284Sjmallett if (cvmx_dma_engine_get_num() >= 5) 114210284Sjmallett dma_control.s.dma4_enb = 1; 115210284Sjmallett dma_control.s.dma3_enb = 1; 116210284Sjmallett dma_control.s.dma2_enb = 1; 117210284Sjmallett dma_control.s.dma1_enb = 1; 118210284Sjmallett dma_control.s.dma0_enb = 1; 119210284Sjmallett dma_control.s.o_mode = 1; /* Pull NS and RO from this register, not the pointers */ 120210284Sjmallett //dma_control.s.dwb_denb = 1; 121210284Sjmallett //dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128; 122210284Sjmallett dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL; 123210284Sjmallett dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8; 124210284Sjmallett cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64); 125210284Sjmallett /* As a workaround for errata PCIE-811 we only allow a single 126210284Sjmallett outstanding DMA read over PCIe at a time. This limits performance, 127210284Sjmallett but works in all cases. If you need higher performance, remove 128210284Sjmallett this code and implement the more complicated workaround documented 129210284Sjmallett in the errata. This only affects CN56XX pass 2.0 chips */ 130210284Sjmallett if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_0)) 131210284Sjmallett { 132210284Sjmallett cvmx_npei_dma_pcie_req_num_t pcie_req_num; 133210284Sjmallett pcie_req_num.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM); 134210284Sjmallett pcie_req_num.s.dma_cnt = 1; 135210284Sjmallett cvmx_write_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM, pcie_req_num.u64); 136210284Sjmallett } 137210284Sjmallett } 138210284Sjmallett else 139210284Sjmallett { 140210284Sjmallett cvmx_npi_dma_control_t dma_control; 141210284Sjmallett dma_control.u64 = 0; 142210284Sjmallett //dma_control.s.dwb_denb = 1; 143210284Sjmallett //dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128; 144210284Sjmallett dma_control.s.o_add1 = 1; 145210284Sjmallett dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL; 146210284Sjmallett dma_control.s.hp_enb = 1; 147210284Sjmallett dma_control.s.lp_enb = 1; 148210284Sjmallett dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8; 149210284Sjmallett cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64); 150210284Sjmallett } 151210284Sjmallett 152210284Sjmallett return 0; 153210284Sjmallett} 154210284Sjmallett 155210284Sjmallett 156210284Sjmallett/** 157210284Sjmallett * Shutdown all DMA engines. The engeines must be idle when this 158210284Sjmallett * function is called. 159210284Sjmallett * 160210284Sjmallett * @return Zero on success, negative on failure 161210284Sjmallett */ 162210284Sjmallettint cvmx_dma_engine_shutdown(void) 163210284Sjmallett{ 164210284Sjmallett int engine; 165210284Sjmallett 166210284Sjmallett for (engine=0; engine < cvmx_dma_engine_get_num(); engine++) 167210284Sjmallett { 168210284Sjmallett if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_DMA(engine))) 169210284Sjmallett { 170210284Sjmallett cvmx_dprintf("ERROR: cvmx_dma_engine_shutdown: Engine not idle.\n"); 171210284Sjmallett return -1; 172210284Sjmallett } 173210284Sjmallett } 174210284Sjmallett 175210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 176210284Sjmallett { 177210284Sjmallett cvmx_npei_dma_control_t dma_control; 178210284Sjmallett dma_control.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL); 179210284Sjmallett if (cvmx_dma_engine_get_num() >= 5) 180210284Sjmallett dma_control.s.dma4_enb = 0; 181210284Sjmallett dma_control.s.dma3_enb = 0; 182210284Sjmallett dma_control.s.dma2_enb = 0; 183210284Sjmallett dma_control.s.dma1_enb = 0; 184210284Sjmallett dma_control.s.dma0_enb = 0; 185210284Sjmallett cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64); 186210284Sjmallett /* Make sure the disable completes */ 187210284Sjmallett cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL); 188210284Sjmallett } 189210284Sjmallett else 190210284Sjmallett { 191210284Sjmallett cvmx_npi_dma_control_t dma_control; 192210284Sjmallett dma_control.u64 = cvmx_read_csr(CVMX_NPI_DMA_CONTROL); 193210284Sjmallett dma_control.s.hp_enb = 0; 194210284Sjmallett dma_control.s.lp_enb = 0; 195210284Sjmallett cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64); 196210284Sjmallett /* Make sure the disable completes */ 197210284Sjmallett cvmx_read_csr(CVMX_NPI_DMA_CONTROL); 198210284Sjmallett } 199210284Sjmallett 200210284Sjmallett for (engine=0; engine < cvmx_dma_engine_get_num(); engine++) 201210284Sjmallett { 202210284Sjmallett cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_DMA(engine)); 203210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 204210284Sjmallett cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), 0); 205210284Sjmallett else 206210284Sjmallett { 207210284Sjmallett if (engine) 208210284Sjmallett cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, 0); 209210284Sjmallett else 210210284Sjmallett cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, 0); 211210284Sjmallett } 212210284Sjmallett } 213210284Sjmallett 214210284Sjmallett return 0; 215210284Sjmallett} 216210284Sjmallett 217210284Sjmallett 218210284Sjmallett/** 219210284Sjmallett * Submit a series of DMA comamnd to the DMA engines. 220210284Sjmallett * 221210284Sjmallett * @param engine Engine to submit to (0-4) 222210284Sjmallett * @param header Command header 223210284Sjmallett * @param num_buffers 224210284Sjmallett * The number of data pointers 225210284Sjmallett * @param buffers Comamnd data pointers 226210284Sjmallett * 227210284Sjmallett * @return Zero on success, negative on failure 228210284Sjmallett */ 229210284Sjmallettint cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[]) 230210284Sjmallett{ 231210284Sjmallett cvmx_cmd_queue_result_t result; 232210284Sjmallett int cmd_count = 1; 233210284Sjmallett uint64_t cmds[num_buffers + 1]; 234210284Sjmallett 235210284Sjmallett if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) 236210284Sjmallett { 237210284Sjmallett /* Check for Errata PCIe-604 */ 238210284Sjmallett if ((header.s.nfst > 11) || (header.s.nlst > 11) || (header.s.nfst + header.s.nlst > 15)) 239210284Sjmallett { 240210284Sjmallett cvmx_dprintf("DMA engine submit too large\n"); 241210284Sjmallett return -1; 242210284Sjmallett } 243210284Sjmallett } 244210284Sjmallett 245210284Sjmallett cmds[0] = header.u64; 246210284Sjmallett while (num_buffers--) 247210284Sjmallett { 248210284Sjmallett cmds[cmd_count++] = buffers->u64; 249210284Sjmallett buffers++; 250210284Sjmallett } 251210284Sjmallett 252210284Sjmallett /* Due to errata PCIE-13315, it is necessary to have the queue lock while we 253210284Sjmallett ring the doorbell for the DMA engines. This prevents doorbells from 254210284Sjmallett possibly arriving out of order with respect to the command queue 255210284Sjmallett entries */ 256210284Sjmallett __cvmx_cmd_queue_lock(CVMX_CMD_QUEUE_DMA(engine), __cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine))); 257210284Sjmallett result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_DMA(engine), 0, cmd_count, cmds); 258210284Sjmallett /* This SYNCWS is needed since the command queue didn't do locking, which 259210284Sjmallett normally implies the SYNCWS. This one makes sure the command queue 260210284Sjmallett updates make it to L2 before we ring the doorbell */ 261210284Sjmallett CVMX_SYNCWS; 262210284Sjmallett /* A syncw isn't needed here since the command queue did one as part of the queue unlock */ 263210284Sjmallett if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) 264210284Sjmallett { 265210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 266210284Sjmallett { 267210284Sjmallett /* DMA doorbells are 32bit writes in little endian space. This means we need to xor the address with 4 */ 268210284Sjmallett cvmx_write64_uint32(CVMX_PEXP_NPEI_DMAX_DBELL(engine)^4, cmd_count); 269210284Sjmallett } 270210284Sjmallett else 271210284Sjmallett { 272210284Sjmallett if (engine) 273210284Sjmallett cvmx_write_csr(CVMX_NPI_HIGHP_DBELL, cmd_count); 274210284Sjmallett else 275210284Sjmallett cvmx_write_csr(CVMX_NPI_LOWP_DBELL, cmd_count); 276210284Sjmallett } 277210284Sjmallett } 278210284Sjmallett /* Here is the unlock for the above errata workaround */ 279210284Sjmallett __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine))); 280210284Sjmallett return result; 281210284Sjmallett} 282210284Sjmallett 283210284Sjmallett 284210284Sjmallett/** 285210284Sjmallett * @INTERNAL 286210284Sjmallett * Function used by cvmx_dma_engine_transfer() to build the 287210284Sjmallett * internal address list. 288210284Sjmallett * 289210284Sjmallett * @param buffers Location to store the list 290210284Sjmallett * @param address Address to build list for 291210284Sjmallett * @param size Length of the memory pointed to by address 292210284Sjmallett * 293210284Sjmallett * @return Number of internal pointer chunks created 294210284Sjmallett */ 295210284Sjmallettstatic inline int __cvmx_dma_engine_build_internal_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size) 296210284Sjmallett{ 297210284Sjmallett int segments = 0; 298210284Sjmallett while (size) 299210284Sjmallett { 300210284Sjmallett /* Each internal chunk can contain a maximum of 8191 bytes */ 301210284Sjmallett int chunk = size; 302210284Sjmallett if (chunk > 8191) 303210284Sjmallett chunk = 8191; 304210284Sjmallett buffers[segments].u64 = 0; 305210284Sjmallett buffers[segments].internal.size = chunk; 306210284Sjmallett buffers[segments].internal.addr = address; 307210284Sjmallett address += chunk; 308210284Sjmallett size -= chunk; 309210284Sjmallett segments++; 310210284Sjmallett } 311210284Sjmallett return segments; 312210284Sjmallett} 313210284Sjmallett 314210284Sjmallett 315210284Sjmallett/** 316210284Sjmallett * @INTERNAL 317210284Sjmallett * Function used by cvmx_dma_engine_transfer() to build the PCI / PCIe address 318210284Sjmallett * list. 319210284Sjmallett * @param buffers Location to store the list 320210284Sjmallett * @param address Address to build list for 321210284Sjmallett * @param size Length of the memory pointed to by address 322210284Sjmallett * 323210284Sjmallett * @return Number of PCI / PCIe address chunks created. The number of words used 324210284Sjmallett * will be segments + (segments-1)/4 + 1. 325210284Sjmallett */ 326210284Sjmallettstatic inline int __cvmx_dma_engine_build_external_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size) 327210284Sjmallett{ 328210284Sjmallett const int MAX_SIZE = 65535; 329210284Sjmallett int segments = 0; 330210284Sjmallett while (size) 331210284Sjmallett { 332210284Sjmallett /* Each block of 4 PCI / PCIe pointers uses one dword for lengths followed by 333210284Sjmallett up to 4 addresses. This then repeats if more data is needed */ 334210284Sjmallett buffers[0].u64 = 0; 335210284Sjmallett if (size <= MAX_SIZE) 336210284Sjmallett { 337210284Sjmallett /* Only one more segment needed */ 338210284Sjmallett buffers[0].pcie_length.len0 = size; 339210284Sjmallett buffers[1].u64 = address; 340210284Sjmallett segments++; 341210284Sjmallett break; 342210284Sjmallett } 343210284Sjmallett else if (size <= MAX_SIZE * 2) 344210284Sjmallett { 345210284Sjmallett /* Two more segments needed */ 346210284Sjmallett buffers[0].pcie_length.len0 = MAX_SIZE; 347210284Sjmallett buffers[0].pcie_length.len1 = size - MAX_SIZE; 348210284Sjmallett buffers[1].u64 = address; 349210284Sjmallett address += MAX_SIZE; 350210284Sjmallett buffers[2].u64 = address; 351210284Sjmallett segments+=2; 352210284Sjmallett break; 353210284Sjmallett } 354210284Sjmallett else if (size <= MAX_SIZE * 3) 355210284Sjmallett { 356210284Sjmallett /* Three more segments needed */ 357210284Sjmallett buffers[0].pcie_length.len0 = MAX_SIZE; 358210284Sjmallett buffers[0].pcie_length.len1 = MAX_SIZE; 359210284Sjmallett buffers[0].pcie_length.len2 = size - MAX_SIZE * 2; 360210284Sjmallett buffers[1].u64 = address; 361210284Sjmallett address += MAX_SIZE; 362210284Sjmallett buffers[2].u64 = address; 363210284Sjmallett address += MAX_SIZE; 364210284Sjmallett buffers[3].u64 = address; 365210284Sjmallett segments+=3; 366210284Sjmallett break; 367210284Sjmallett } 368210284Sjmallett else if (size <= MAX_SIZE * 4) 369210284Sjmallett { 370210284Sjmallett /* Four more segments needed */ 371210284Sjmallett buffers[0].pcie_length.len0 = MAX_SIZE; 372210284Sjmallett buffers[0].pcie_length.len1 = MAX_SIZE; 373210284Sjmallett buffers[0].pcie_length.len2 = MAX_SIZE; 374210284Sjmallett buffers[0].pcie_length.len3 = size - MAX_SIZE * 3; 375210284Sjmallett buffers[1].u64 = address; 376210284Sjmallett address += MAX_SIZE; 377210284Sjmallett buffers[2].u64 = address; 378210284Sjmallett address += MAX_SIZE; 379210284Sjmallett buffers[3].u64 = address; 380210284Sjmallett address += MAX_SIZE; 381210284Sjmallett buffers[4].u64 = address; 382210284Sjmallett segments+=4; 383210284Sjmallett break; 384210284Sjmallett } 385210284Sjmallett else 386210284Sjmallett { 387210284Sjmallett /* Five or more segments are needed */ 388210284Sjmallett buffers[0].pcie_length.len0 = MAX_SIZE; 389210284Sjmallett buffers[0].pcie_length.len1 = MAX_SIZE; 390210284Sjmallett buffers[0].pcie_length.len2 = MAX_SIZE; 391210284Sjmallett buffers[0].pcie_length.len3 = MAX_SIZE; 392210284Sjmallett buffers[1].u64 = address; 393210284Sjmallett address += MAX_SIZE; 394210284Sjmallett buffers[2].u64 = address; 395210284Sjmallett address += MAX_SIZE; 396210284Sjmallett buffers[3].u64 = address; 397210284Sjmallett address += MAX_SIZE; 398210284Sjmallett buffers[4].u64 = address; 399210284Sjmallett address += MAX_SIZE; 400210284Sjmallett size -= MAX_SIZE*4; 401210284Sjmallett buffers += 5; 402210284Sjmallett segments+=4; 403210284Sjmallett } 404210284Sjmallett } 405210284Sjmallett return segments; 406210284Sjmallett} 407210284Sjmallett 408210284Sjmallett 409210284Sjmallett/** 410210284Sjmallett * Build the first and last pointers based on a DMA engine header 411210284Sjmallett * and submit them to the engine. The purpose of this function is 412210284Sjmallett * to simplify the building of DMA engine commands by automatically 413210284Sjmallett * converting a simple address and size into the apropriate internal 414210284Sjmallett * or PCI / PCIe address list. This function does not support gather lists, 415210284Sjmallett * so you will need to build your own lists in that case. 416210284Sjmallett * 417210284Sjmallett * @param engine Engine to submit to (0-4) 418210284Sjmallett * @param header DMA Command header. Note that the nfst and nlst fields do not 419210284Sjmallett * need to be filled in. All other fields must be set properly. 420210284Sjmallett * @param first_address 421210284Sjmallett * Address to use for the first pointers. In the case of INTERNAL, 422210284Sjmallett * INBOUND, and OUTBOUND this is an Octeon memory address. In the 423210284Sjmallett * case of EXTERNAL, this is the source PCI / PCIe address. 424210284Sjmallett * @param last_address 425210284Sjmallett * Address to use for the last pointers. In the case of EXTERNAL, 426210284Sjmallett * INBOUND, and OUTBOUND this is a PCI / PCIe address. In the 427210284Sjmallett * case of INTERNAL, this is the Octeon memory destination address. 428210284Sjmallett * @param size Size of the transfer to perform. 429210284Sjmallett * 430210284Sjmallett * @return Zero on success, negative on failure 431210284Sjmallett */ 432210284Sjmallettint cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header, 433210284Sjmallett uint64_t first_address, uint64_t last_address, 434210284Sjmallett int size) 435210284Sjmallett{ 436210284Sjmallett cvmx_dma_engine_buffer_t buffers[32]; 437210284Sjmallett int words = 0; 438210284Sjmallett 439210284Sjmallett switch (header.s.type) 440210284Sjmallett { 441210284Sjmallett case CVMX_DMA_ENGINE_TRANSFER_INTERNAL: 442210284Sjmallett header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size); 443210284Sjmallett words += header.s.nfst; 444210284Sjmallett header.s.nlst = __cvmx_dma_engine_build_internal_pointers(buffers + words, last_address, size); 445210284Sjmallett words += header.s.nlst; 446210284Sjmallett break; 447210284Sjmallett case CVMX_DMA_ENGINE_TRANSFER_INBOUND: 448210284Sjmallett case CVMX_DMA_ENGINE_TRANSFER_OUTBOUND: 449210284Sjmallett header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size); 450210284Sjmallett words += header.s.nfst; 451210284Sjmallett header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size); 452210284Sjmallett words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1; 453210284Sjmallett break; 454210284Sjmallett case CVMX_DMA_ENGINE_TRANSFER_EXTERNAL: 455210284Sjmallett header.s.nfst = __cvmx_dma_engine_build_external_pointers(buffers, first_address, size); 456210284Sjmallett words += header.s.nfst + ((header.s.nfst-1) >> 2) + 1; 457210284Sjmallett header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size); 458210284Sjmallett words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1; 459210284Sjmallett break; 460210284Sjmallett } 461210284Sjmallett return cvmx_dma_engine_submit(engine, header, words, buffers); 462210284Sjmallett} 463210284Sjmallett 464210284Sjmallett#endif 465