1/************************************************************************* 2SPDX-License-Identifier: BSD-3-Clause 3 4Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights 5reserved. 6 7 8Redistribution and use in source and binary forms, with or without 9modification, are permitted provided that the following conditions are 10met: 11 12 * Redistributions of source code must retain the above copyright 13 notice, this list of conditions and the following disclaimer. 14 15 * Redistributions in binary form must reproduce the above 16 copyright notice, this list of conditions and the following 17 disclaimer in the documentation and/or other materials provided 18 with the distribution. 19 20 * Neither the name of Cavium Networks nor the names of 21 its contributors may be used to endorse or promote products 22 derived from this software without specific prior written 23 permission. 24 25This Software, including technical data, may be subject to U.S. export control laws, including the U.S. Export Administration Act and its associated regulations, and may be subject to export or import regulations in other countries. 26 27TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 28AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 29 30*************************************************************************/ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD$"); 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/bus.h> 38#include <sys/endian.h> 39#include <sys/kernel.h> 40#include <sys/mbuf.h> 41#include <sys/socket.h> 42 43#include <net/bpf.h> 44#include <net/ethernet.h> 45#include <net/if.h> 46#include <net/if_var.h> 47 48#include "wrapper-cvmx-includes.h" 49#include "ethernet-headers.h" 50 51/* You can define GET_MBUF_QOS() to override how the mbuf output function 52 determines which output queue is used. The default implementation 53 always uses the base queue for the port. If, for example, you wanted 54 to use the m->priority fieid, define GET_MBUF_QOS as: 55 #define GET_MBUF_QOS(m) ((m)->priority) */ 56#ifndef GET_MBUF_QOS 57 #define GET_MBUF_QOS(m) 0 58#endif 59 60 61/** 62 * Packet transmit 63 * 64 * @param m Packet to send 65 * @param dev Device info structure 66 * @return Always returns zero 67 */ 68int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp) 69{ 70 cvmx_pko_command_word0_t pko_command; 71 cvmx_buf_ptr_t hw_buffer; 72 int dropped; 73 int qos; 74 cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; 75 int32_t in_use; 76 int32_t buffers_to_free; 77 cvmx_wqe_t *work; 78 79 /* Prefetch the private data structure. 80 It is larger that one cache line */ 81 CVMX_PREFETCH(priv, 0); 82 83 /* Start off assuming no drop */ 84 dropped = 0; 85 86 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely 87 remove "qos" in the event neither interface supports multiple queues 88 per port */ 89 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || 90 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { 91 qos = GET_MBUF_QOS(m); 92 if (qos <= 0) 93 qos = 0; 94 else if (qos >= cvmx_pko_get_num_queues(priv->port)) 95 qos = 0; 96 } else 97 qos = 0; 98 99 /* The CN3XXX series of parts has an errata (GMX-401) which causes the 100 GMX block to hang if a collision occurs towards the end of a 101 <68 byte packet. As a workaround for this, we pad packets to be 102 68 bytes whenever we are in half duplex mode. We don't handle 103 the case of having a small packet but no room to add the padding. 104 The kernel should always give us at least a cache line */ 105 if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { 106 cvmx_gmxx_prtx_cfg_t gmx_prt_cfg; 107 int interface = INTERFACE(priv->port); 108 int index = INDEX(priv->port); 109 110 if (interface < 2) { 111 /* We only need to pad packet in half duplex mode */ 112 gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 113 if (gmx_prt_cfg.s.duplex == 0) { 114 static uint8_t pad[64]; 115 116 if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) 117 printf("%s: unable to padd small packet.", __func__); 118 } 119 } 120 } 121 122#ifdef OCTEON_VENDOR_RADISYS 123 /* 124 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes. 125 */ 126 if (__predict_false(m->m_pkthdr.len < 60) && 127 cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) { 128 static uint8_t pad[60]; 129 130 if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) 131 printf("%s: unable to pad small packet.", __func__); 132 } 133#endif 134 135 /* 136 * If the packet is not fragmented. 137 */ 138 if (m->m_pkthdr.len == m->m_len) { 139 /* Build the PKO buffer pointer */ 140 hw_buffer.u64 = 0; 141 hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data); 142 hw_buffer.s.pool = 0; 143 hw_buffer.s.size = m->m_len; 144 145 /* Build the PKO command */ 146 pko_command.u64 = 0; 147 pko_command.s.segs = 1; 148 pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA. */ 149 150 work = NULL; 151 } else { 152 struct mbuf *n; 153 unsigned segs; 154 uint64_t *gp; 155 156 /* 157 * The packet is fragmented, we need to send a list of segments 158 * in memory we borrow from the WQE pool. 159 */ 160 work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); 161 if (work == NULL) { 162 m_freem(m); 163 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 164 return 1; 165 } 166 167 segs = 0; 168 gp = (uint64_t *)work; 169 for (n = m; n != NULL; n = n->m_next) { 170 if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t)) 171 panic("%s: too many segments in packet; call m_collapse().", __func__); 172 173 /* Build the PKO buffer pointer */ 174 hw_buffer.u64 = 0; 175 hw_buffer.s.i = 1; /* Do not put this buffer into the FPA. */ 176 hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data); 177 hw_buffer.s.pool = 0; 178 hw_buffer.s.size = n->m_len; 179 180 *gp++ = hw_buffer.u64; 181 segs++; 182 } 183 184 /* Build the PKO buffer gather list pointer */ 185 hw_buffer.u64 = 0; 186 hw_buffer.s.addr = cvmx_ptr_to_phys(work); 187 hw_buffer.s.pool = CVMX_FPA_WQE_POOL; 188 hw_buffer.s.size = segs; 189 190 /* Build the PKO command */ 191 pko_command.u64 = 0; 192 pko_command.s.segs = segs; 193 pko_command.s.gather = 1; 194 pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA. */ 195 } 196 197 /* Finish building the PKO command */ 198 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 199 pko_command.s.reg0 = priv->fau+qos*4; 200 pko_command.s.total_bytes = m->m_pkthdr.len; 201 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; 202 pko_command.s.subone0 = 1; 203 204 /* Check if we can use the hardware checksumming */ 205 if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) { 206 /* Use hardware checksum calc */ 207 pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1; 208 } 209 210 /* 211 * XXX 212 * Could use a different free queue (and different FAU address) per 213 * core instead of per QoS, to reduce contention here. 214 */ 215 IF_LOCK(&priv->tx_free_queue[qos]); 216 /* Get the number of mbufs in use by the hardware */ 217 in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1); 218 buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 219 220 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE); 221 222 /* Drop this packet if we have too many already queued to the HW */ 223 if (_IF_QFULL(&priv->tx_free_queue[qos])) { 224 dropped = 1; 225 } 226 /* Send the packet to the output queue */ 227 else 228 if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) { 229 DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp)); 230 dropped = 1; 231 } 232 233 if (__predict_false(dropped)) { 234 m_freem(m); 235 cvmx_fau_atomic_add32(priv->fau+qos*4, -1); 236 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 237 } else { 238 /* Put this packet on the queue to be freed later */ 239 _IF_ENQUEUE(&priv->tx_free_queue[qos], m); 240 241 /* Pass it to any BPF listeners. */ 242 ETHER_BPF_MTAP(ifp, m); 243 244 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 245 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); 246 } 247 248 /* Free mbufs not in use by the hardware */ 249 if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { 250 while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { 251 _IF_DEQUEUE(&priv->tx_free_queue[qos], m); 252 m_freem(m); 253 } 254 } 255 IF_UNLOCK(&priv->tx_free_queue[qos]); 256 257 return dropped; 258} 259 260 261/** 262 * This function frees all mbufs that are currenty queued for TX. 263 * 264 * @param dev Device being shutdown 265 */ 266void cvm_oct_tx_shutdown(struct ifnet *ifp) 267{ 268 cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; 269 int qos; 270 271 for (qos = 0; qos < 16; qos++) { 272 IF_DRAIN(&priv->tx_free_queue[qos]); 273 } 274} 275