1/*************************************************************************
2Copyright (c) 2003-2007  Cavium Networks (support@cavium.com). All rights
3reserved.
4
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are
8met:
9
10    * Redistributions of source code must retain the above copyright
11      notice, this list of conditions and the following disclaimer.
12
13    * Redistributions in binary form must reproduce the above
14      copyright notice, this list of conditions and the following
15      disclaimer in the documentation and/or other materials provided
16      with the distribution.
17
18    * Neither the name of Cavium Networks nor the names of
19      its contributors may be used to endorse or promote products
20      derived from this software without specific prior written
21      permission.
22
23This Software, including technical data, may be subject to U.S. export  control laws, including the U.S. Export Administration Act and its  associated regulations, and may be subject to export or import  regulations in other countries.
24
25TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
26AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
27
28*************************************************************************/
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/mbuf.h>
39#include <sys/socket.h>
40
41#include <net/bpf.h>
42#include <net/ethernet.h>
43#include <net/if.h>
44
45#include "wrapper-cvmx-includes.h"
46#include "ethernet-headers.h"
47
48/* You can define GET_MBUF_QOS() to override how the mbuf output function
49   determines which output queue is used. The default implementation
50   always uses the base queue for the port. If, for example, you wanted
51   to use the m->priority fieid, define GET_MBUF_QOS as:
52   #define GET_MBUF_QOS(m) ((m)->priority) */
53#ifndef GET_MBUF_QOS
54    #define GET_MBUF_QOS(m) 0
55#endif
56
57
58/**
59 * Packet transmit
60 *
61 * @param m    Packet to send
62 * @param dev    Device info structure
63 * @return Always returns zero
64 */
65int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
66{
67	cvmx_pko_command_word0_t    pko_command;
68	cvmx_buf_ptr_t              hw_buffer;
69	int                         dropped;
70	int                         qos;
71	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
72	int32_t in_use;
73	int32_t buffers_to_free;
74	cvmx_wqe_t *work;
75
76	/* Prefetch the private data structure.
77	   It is larger that one cache line */
78	CVMX_PREFETCH(priv, 0);
79
80	/* Start off assuming no drop */
81	dropped = 0;
82
83	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
84	   remove "qos" in the event neither interface supports multiple queues
85	   per port */
86	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
87	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
88		qos = GET_MBUF_QOS(m);
89		if (qos <= 0)
90			qos = 0;
91		else if (qos >= cvmx_pko_get_num_queues(priv->port))
92			qos = 0;
93	} else
94		qos = 0;
95
96	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
97	   GMX block to hang if a collision occurs towards the end of a
98	   <68 byte packet. As a workaround for this, we pad packets to be
99	   68 bytes whenever we are in half duplex mode. We don't handle
100	   the case of having a small packet but no room to add the padding.
101	   The kernel should always give us at least a cache line */
102	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
103		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
104		int interface = INTERFACE(priv->port);
105		int index = INDEX(priv->port);
106
107		if (interface < 2) {
108			/* We only need to pad packet in half duplex mode */
109			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
110			if (gmx_prt_cfg.s.duplex == 0) {
111				static uint8_t pad[64];
112
113				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
114					printf("%s: unable to padd small packet.", __func__);
115			}
116		}
117	}
118
119#ifdef OCTEON_VENDOR_RADISYS
120	/*
121	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
122	 */
123	if (__predict_false(m->m_pkthdr.len < 60) &&
124	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
125		static uint8_t pad[60];
126
127		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
128			printf("%s: unable to pad small packet.", __func__);
129	}
130#endif
131
132	/*
133	 * If the packet is not fragmented.
134	 */
135	if (m->m_pkthdr.len == m->m_len) {
136		/* Build the PKO buffer pointer */
137		hw_buffer.u64 = 0;
138		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
139		hw_buffer.s.pool = 0;
140		hw_buffer.s.size = m->m_len;
141
142		/* Build the PKO command */
143		pko_command.u64 = 0;
144		pko_command.s.segs = 1;
145		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */
146
147		work = NULL;
148	} else {
149		struct mbuf *n;
150		unsigned segs;
151		uint64_t *gp;
152
153		/*
154		 * The packet is fragmented, we need to send a list of segments
155		 * in memory we borrow from the WQE pool.
156		 */
157		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
158		if (work == NULL) {
159			m_freem(m);
160			ifp->if_oerrors++;
161			return 1;
162		}
163
164		segs = 0;
165		gp = (uint64_t *)work;
166		for (n = m; n != NULL; n = n->m_next) {
167			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
168				panic("%s: too many segments in packet; call m_collapse().", __func__);
169
170			/* Build the PKO buffer pointer */
171			hw_buffer.u64 = 0;
172			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
173			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
174			hw_buffer.s.pool = 0;
175			hw_buffer.s.size = n->m_len;
176
177			*gp++ = hw_buffer.u64;
178			segs++;
179		}
180
181		/* Build the PKO buffer gather list pointer */
182		hw_buffer.u64 = 0;
183		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
184		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
185		hw_buffer.s.size = segs;
186
187		/* Build the PKO command */
188		pko_command.u64 = 0;
189		pko_command.s.segs = segs;
190		pko_command.s.gather = 1;
191		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
192	}
193
194	/* Finish building the PKO command */
195	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
196	pko_command.s.reg0 = priv->fau+qos*4;
197	pko_command.s.total_bytes = m->m_pkthdr.len;
198	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
199	pko_command.s.subone0 = 1;
200
201	/* Check if we can use the hardware checksumming */
202	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
203		/* Use hardware checksum calc */
204		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
205	}
206
207	/*
208	 * XXX
209	 * Could use a different free queue (and different FAU address) per
210	 * core instead of per QoS, to reduce contention here.
211	 */
212	IF_LOCK(&priv->tx_free_queue[qos]);
213	/* Get the number of mbufs in use by the hardware */
214	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
215	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
216
217	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);
218
219	/* Drop this packet if we have too many already queued to the HW */
220	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
221		dropped = 1;
222	}
223	/* Send the packet to the output queue */
224	else
225	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
226		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
227		dropped = 1;
228	}
229
230	if (__predict_false(dropped)) {
231		m_freem(m);
232		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
233		ifp->if_oerrors++;
234	} else {
235		/* Put this packet on the queue to be freed later */
236		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);
237
238		/* Pass it to any BPF listeners.  */
239		ETHER_BPF_MTAP(ifp, m);
240
241		ifp->if_opackets++;
242		ifp->if_obytes += m->m_pkthdr.len;
243	}
244
245	/* Free mbufs not in use by the hardware */
246	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
247		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
248			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
249			m_freem(m);
250		}
251	}
252	IF_UNLOCK(&priv->tx_free_queue[qos]);
253
254	return dropped;
255}
256
257
258/**
259 * This function frees all mbufs that are currenty queued for TX.
260 *
261 * @param dev    Device being shutdown
262 */
263void cvm_oct_tx_shutdown(struct ifnet *ifp)
264{
265	cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
266	int qos;
267
268	for (qos = 0; qos < 16; qos++) {
269		IF_DRAIN(&priv->tx_free_queue[qos]);
270	}
271}
272