1219820Sjeff/*
2272027Shselasky * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3219820Sjeff *
4219820Sjeff * This software is available to you under a choice of one of two
5219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
6219820Sjeff * General Public License (GPL) Version 2, available from the file
7219820Sjeff * COPYING in the main directory of this source tree, or the
8219820Sjeff * OpenIB.org BSD license below:
9219820Sjeff *
10219820Sjeff *     Redistribution and use in source and binary forms, with or
11219820Sjeff *     without modification, are permitted provided that the following
12219820Sjeff *     conditions are met:
13219820Sjeff *
14219820Sjeff *      - Redistributions of source code must retain the above
15219820Sjeff *        copyright notice, this list of conditions and the following
16219820Sjeff *        disclaimer.
17219820Sjeff *
18219820Sjeff *      - Redistributions in binary form must reproduce the above
19219820Sjeff *        copyright notice, this list of conditions and the following
20219820Sjeff *        disclaimer in the documentation and/or other materials
21219820Sjeff *        provided with the distribution.
22219820Sjeff *
23219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30219820Sjeff * SOFTWARE.
31219820Sjeff *
32219820Sjeff */
33219820Sjeff
34300676Shselasky#define	LINUXKPI_PARAM_PREFIX mlx4_
35300676Shselasky
36273135Shselasky#include <linux/page.h>
37306486Shselasky#include <dev/mlx4/cq.h>
38272027Shselasky#include <linux/slab.h>
39306486Shselasky#include <dev/mlx4/qp.h>
40272027Shselasky#include <linux/if_vlan.h>
41219820Sjeff#include <linux/vmalloc.h>
42272027Shselasky#include <linux/moduleparam.h>
43219820Sjeff
44219820Sjeff#include <netinet/in_systm.h>
45219820Sjeff#include <netinet/in.h>
46219820Sjeff#include <netinet/if_ether.h>
47219820Sjeff#include <netinet/ip.h>
48219820Sjeff#include <netinet/ip6.h>
49219820Sjeff#include <netinet/tcp.h>
50219820Sjeff#include <netinet/tcp_lro.h>
51219820Sjeff#include <netinet/udp.h>
52219820Sjeff
53306486Shselasky#include "en.h"
54272027Shselasky
55219820Sjeffint mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
56272027Shselasky			   struct mlx4_en_tx_ring **pring, u32 size,
57272027Shselasky			   u16 stride, int node, int queue_idx)
58219820Sjeff{
59219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
60272027Shselasky	struct mlx4_en_tx_ring *ring;
61291699Shselasky	uint32_t x;
62219820Sjeff	int tmp;
63219820Sjeff	int err;
64219820Sjeff
65272027Shselasky	ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
66272027Shselasky	if (!ring) {
67272027Shselasky		ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
68272027Shselasky		if (!ring) {
69272027Shselasky			en_err(priv, "Failed allocating TX ring\n");
70272027Shselasky			return -ENOMEM;
71272027Shselasky		}
72272027Shselasky	}
73272027Shselasky
74291699Shselasky	/* Create DMA descriptor TAG */
75291699Shselasky	if ((err = -bus_dma_tag_create(
76291699Shselasky	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
77291699Shselasky	    1,					/* any alignment */
78291699Shselasky	    0,					/* no boundary */
79291699Shselasky	    BUS_SPACE_MAXADDR,			/* lowaddr */
80291699Shselasky	    BUS_SPACE_MAXADDR,			/* highaddr */
81291699Shselasky	    NULL, NULL,				/* filter, filterarg */
82291699Shselasky	    MLX4_EN_TX_MAX_PAYLOAD_SIZE,	/* maxsize */
83291699Shselasky	    MLX4_EN_TX_MAX_MBUF_FRAGS,		/* nsegments */
84291699Shselasky	    MLX4_EN_TX_MAX_MBUF_SIZE,		/* maxsegsize */
85291699Shselasky	    0,					/* flags */
86291699Shselasky	    NULL, NULL,				/* lockfunc, lockfuncarg */
87291699Shselasky	    &ring->dma_tag)))
88291699Shselasky		goto done;
89291699Shselasky
90219820Sjeff	ring->size = size;
91219820Sjeff	ring->size_mask = size - 1;
92219820Sjeff	ring->stride = stride;
93329159Shselasky	ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE));
94219820Sjeff	mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
95219820Sjeff	mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
96219820Sjeff
97219820Sjeff	tmp = size * sizeof(struct mlx4_en_tx_info);
98291699Shselasky	ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
99219820Sjeff	if (!ring->tx_info) {
100291699Shselasky		ring->tx_info = kzalloc(tmp, GFP_KERNEL);
101272027Shselasky		if (!ring->tx_info) {
102272027Shselasky			err = -ENOMEM;
103272027Shselasky			goto err_ring;
104272027Shselasky		}
105219820Sjeff	}
106272027Shselasky
107291699Shselasky	/* Create DMA descriptor MAPs */
108291699Shselasky	for (x = 0; x != size; x++) {
109291699Shselasky		err = -bus_dmamap_create(ring->dma_tag, 0,
110291699Shselasky		    &ring->tx_info[x].dma_map);
111291699Shselasky		if (err != 0) {
112291699Shselasky			while (x--) {
113291699Shselasky				bus_dmamap_destroy(ring->dma_tag,
114291699Shselasky				    ring->tx_info[x].dma_map);
115291699Shselasky			}
116291699Shselasky			goto err_info;
117291699Shselasky		}
118291699Shselasky	}
119291699Shselasky
120219820Sjeff	en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
121219820Sjeff		 ring->tx_info, tmp);
122219820Sjeff
123219820Sjeff	ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
124219820Sjeff
125272027Shselasky	/* Allocate HW buffers on provided NUMA node */
126219820Sjeff	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
127219820Sjeff				 2 * PAGE_SIZE);
128219820Sjeff	if (err) {
129219820Sjeff		en_err(priv, "Failed allocating hwq resources\n");
130291699Shselasky		goto err_dma_map;
131219820Sjeff	}
132219820Sjeff
133219820Sjeff	err = mlx4_en_map_buffer(&ring->wqres.buf);
134219820Sjeff	if (err) {
135219820Sjeff		en_err(priv, "Failed to map TX buffer\n");
136219820Sjeff		goto err_hwq_res;
137219820Sjeff	}
138219820Sjeff
139219820Sjeff	ring->buf = ring->wqres.buf.direct.buf;
140219820Sjeff
141219820Sjeff	en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
142219820Sjeff	       "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
143219820Sjeff	       ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
144219820Sjeff
145272027Shselasky	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
146329159Shselasky				    MLX4_RESERVE_ETH_BF_QP);
147219820Sjeff	if (err) {
148272027Shselasky		en_err(priv, "failed reserving qp for TX ring\n");
149219820Sjeff		goto err_map;
150219820Sjeff	}
151219820Sjeff
152329159Shselasky	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
153219820Sjeff	if (err) {
154219820Sjeff		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
155219820Sjeff		goto err_reserve;
156219820Sjeff	}
157219820Sjeff	ring->qp.event = mlx4_en_sqp_event;
158219820Sjeff
159272027Shselasky	err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
160219820Sjeff	if (err) {
161272027Shselasky		en_dbg(DRV, priv, "working without blueflame (%d)", err);
162219820Sjeff		ring->bf.uar = &mdev->priv_uar;
163219820Sjeff		ring->bf.uar->map = mdev->uar_map;
164219820Sjeff		ring->bf_enabled = false;
165219820Sjeff	} else
166219820Sjeff		ring->bf_enabled = true;
167272027Shselasky	ring->queue_index = queue_idx;
168219820Sjeff
169272027Shselasky	*pring = ring;
170219820Sjeff	return 0;
171219820Sjeff
172219820Sjefferr_reserve:
173219820Sjeff	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
174219820Sjefferr_map:
175219820Sjeff	mlx4_en_unmap_buffer(&ring->wqres.buf);
176219820Sjefferr_hwq_res:
177219820Sjeff	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
178291699Shselaskyerr_dma_map:
179291699Shselasky	for (x = 0; x != size; x++)
180291699Shselasky		bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
181272027Shselaskyerr_info:
182272027Shselasky	vfree(ring->tx_info);
183272027Shselaskyerr_ring:
184291699Shselasky	bus_dma_tag_destroy(ring->dma_tag);
185291699Shselaskydone:
186272027Shselasky	kfree(ring);
187219820Sjeff	return err;
188219820Sjeff}
189219820Sjeff
190219820Sjeffvoid mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
191272027Shselasky			     struct mlx4_en_tx_ring **pring)
192219820Sjeff{
193219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
194272027Shselasky	struct mlx4_en_tx_ring *ring = *pring;
195291699Shselasky	uint32_t x;
196219820Sjeff	en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
197219820Sjeff
198219820Sjeff	if (ring->bf_enabled)
199219820Sjeff		mlx4_bf_free(mdev->dev, &ring->bf);
200219820Sjeff	mlx4_qp_remove(mdev->dev, &ring->qp);
201219820Sjeff	mlx4_qp_free(mdev->dev, &ring->qp);
202272027Shselasky	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
203219820Sjeff	mlx4_en_unmap_buffer(&ring->wqres.buf);
204219820Sjeff	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
205291699Shselasky	for (x = 0; x != ring->size; x++)
206291699Shselasky		bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
207272027Shselasky	vfree(ring->tx_info);
208219820Sjeff	mtx_destroy(&ring->tx_lock.m);
209219820Sjeff	mtx_destroy(&ring->comp_lock.m);
210291699Shselasky	bus_dma_tag_destroy(ring->dma_tag);
211272027Shselasky	kfree(ring);
212272027Shselasky	*pring = NULL;
213219820Sjeff}
214219820Sjeff
215219820Sjeffint mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
216219820Sjeff			     struct mlx4_en_tx_ring *ring,
217272027Shselasky			     int cq, int user_prio)
218219820Sjeff{
219219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
220219820Sjeff	int err;
221219820Sjeff
222219820Sjeff	ring->cqn = cq;
223219820Sjeff	ring->prod = 0;
224219820Sjeff	ring->cons = 0xffffffff;
225219820Sjeff	ring->last_nr_txbb = 1;
226219820Sjeff	ring->poll_cnt = 0;
227219820Sjeff	memset(ring->buf, 0, ring->buf_size);
228341909Shselasky	ring->watchdog_time = 0;
229219820Sjeff
230219820Sjeff	ring->qp_state = MLX4_QP_STATE_RST;
231272027Shselasky	ring->doorbell_qpn = ring->qp.qpn << 8;
232219820Sjeff
233219820Sjeff	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
234272027Shselasky				ring->cqn, user_prio, &ring->context);
235219820Sjeff	if (ring->bf_enabled)
236219820Sjeff		ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
237219820Sjeff
238219820Sjeff	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
239219820Sjeff			       &ring->qp, &ring->qp_state);
240219820Sjeff	return err;
241219820Sjeff}
242219820Sjeff
243219820Sjeffvoid mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
244219820Sjeff				struct mlx4_en_tx_ring *ring)
245219820Sjeff{
246219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
247219820Sjeff
248219820Sjeff	mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
249219820Sjeff		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
250219820Sjeff}
251219820Sjeff
252291699Shselaskystatic volatile struct mlx4_wqe_data_seg *
253291699Shselaskymlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg,
254291699Shselasky    struct mbuf *mb, int len, __be32 owner_bit)
255272027Shselasky{
256291699Shselasky	uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
257219820Sjeff
258291699Shselasky	/* copy data into place */
259291699Shselasky	m_copydata(mb, 0, len, inl + 4);
260291699Shselasky	dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
261291699Shselasky	return (dseg);
262272027Shselasky}
263272027Shselasky
264291699Shselaskystatic void
265291699Shselaskymlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg,
266291699Shselasky    int len, __be32 owner_bit)
267219820Sjeff{
268291699Shselasky}
269291699Shselasky
270291699Shselaskystatic void
271291699Shselaskymlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
272291699Shselasky    struct mlx4_en_tx_ring *ring, u32 index, u8 owner)
273291699Shselasky{
274219820Sjeff	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
275291694Shselasky	struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *)
276291699Shselasky	    (ring->buf + (index * TXBB_SIZE));
277291699Shselasky	volatile __be32 *ptr = (__be32 *)tx_desc;
278291699Shselasky	const __be32 stamp = cpu_to_be32(STAMP_VAL |
279291699Shselasky	    ((u32)owner << STAMP_SHIFT));
280291699Shselasky	u32 i;
281219820Sjeff
282291699Shselasky	/* Stamp the freed descriptor */
283291699Shselasky	for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
284291699Shselasky		*ptr = stamp;
285291699Shselasky		ptr += STAMP_DWORDS;
286291699Shselasky	}
287291699Shselasky}
288272027Shselasky
289291699Shselaskystatic u32
290291699Shselaskymlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
291291699Shselasky    struct mlx4_en_tx_ring *ring, u32 index)
292291699Shselasky{
293291699Shselasky	struct mlx4_en_tx_info *tx_info;
294291699Shselasky	struct mbuf *mb;
295272027Shselasky
296291699Shselasky	tx_info = &ring->tx_info[index];
297291699Shselasky	mb = tx_info->mb;
298272027Shselasky
299291699Shselasky	if (mb == NULL)
300291699Shselasky		goto done;
301291699Shselasky
302291699Shselasky	bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
303291699Shselasky	    BUS_DMASYNC_POSTWRITE);
304291699Shselasky	bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
305291699Shselasky
306272027Shselasky        m_freem(mb);
307291699Shselaskydone:
308291699Shselasky	return (tx_info->nr_txbb);
309219820Sjeff}
310219820Sjeff
311219820Sjeffint mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
312219820Sjeff{
313219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(dev);
314219820Sjeff	int cnt = 0;
315219820Sjeff
316219820Sjeff	/* Skip last polled descriptor */
317219820Sjeff	ring->cons += ring->last_nr_txbb;
318219820Sjeff	en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
319219820Sjeff		 ring->cons, ring->prod);
320219820Sjeff
321219820Sjeff	if ((u32) (ring->prod - ring->cons) > ring->size) {
322272027Shselasky                en_warn(priv, "Tx consumer passed producer!\n");
323219820Sjeff		return 0;
324219820Sjeff	}
325219820Sjeff
326219820Sjeff	while (ring->cons != ring->prod) {
327219820Sjeff		ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
328291699Shselasky		    ring->cons & ring->size_mask);
329219820Sjeff		ring->cons += ring->last_nr_txbb;
330219820Sjeff		cnt++;
331219820Sjeff	}
332219820Sjeff
333219820Sjeff	if (cnt)
334219820Sjeff		en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
335219820Sjeff
336219820Sjeff	return cnt;
337219820Sjeff}
338219820Sjeff
339291699Shselaskystatic bool
340291699Shselaskymlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring)
341291699Shselasky{
342291699Shselasky	int wqs;
343291699Shselasky	wqs = ring->size - (ring->prod - ring->cons);
344291699Shselasky	return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS)));
345291699Shselasky}
346291699Shselasky
347272027Shselaskystatic int mlx4_en_process_tx_cq(struct net_device *dev,
348272027Shselasky				 struct mlx4_en_cq *cq)
349219820Sjeff{
350219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(dev);
351219820Sjeff	struct mlx4_cq *mcq = &cq->mcq;
352272027Shselasky	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
353272027Shselasky	struct mlx4_cqe *cqe;
354219820Sjeff	u16 index;
355272027Shselasky	u16 new_index, ring_index, stamp_index;
356219820Sjeff	u32 txbbs_skipped = 0;
357272027Shselasky	u32 txbbs_stamp = 0;
358272027Shselasky	u32 cons_index = mcq->cons_index;
359272027Shselasky	int size = cq->size;
360272027Shselasky	u32 size_mask = ring->size_mask;
361272027Shselasky	struct mlx4_cqe *buf = cq->buf;
362272027Shselasky	int factor = priv->cqe_factor;
363219820Sjeff
364219820Sjeff	if (!priv->port_up)
365272027Shselasky		return 0;
366219820Sjeff
367272027Shselasky	index = cons_index & size_mask;
368272027Shselasky	cqe = &buf[(index << factor) + factor];
369272027Shselasky	ring_index = ring->cons & size_mask;
370272027Shselasky	stamp_index = ring_index;
371272027Shselasky
372272027Shselasky	/* Process all completed CQEs */
373272027Shselasky	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
374272027Shselasky			cons_index & size)) {
375272027Shselasky		/*
376272027Shselasky		 * make sure we read the CQE after we read the
377272027Shselasky		 * ownership bit
378272027Shselasky		 */
379272027Shselasky		rmb();
380272027Shselasky
381272027Shselasky		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
382272027Shselasky			     MLX4_CQE_OPCODE_ERROR)) {
383272027Shselasky			en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n",
384272027Shselasky			       ((struct mlx4_err_cqe *)cqe)->
385272027Shselasky				       vendor_err_syndrome,
386272027Shselasky			       ((struct mlx4_err_cqe *)cqe)->syndrome);
387272027Shselasky		}
388272027Shselasky
389272027Shselasky		/* Skip over last polled CQE */
390272027Shselasky		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
391272027Shselasky
392219820Sjeff		do {
393219820Sjeff			txbbs_skipped += ring->last_nr_txbb;
394272027Shselasky			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
395272027Shselasky			/* free next descriptor */
396219820Sjeff			ring->last_nr_txbb = mlx4_en_free_tx_desc(
397291699Shselasky			    priv, ring, ring_index);
398272027Shselasky			mlx4_en_stamp_wqe(priv, ring, stamp_index,
399272027Shselasky					  !!((ring->cons + txbbs_stamp) &
400272027Shselasky						ring->size));
401272027Shselasky			stamp_index = ring_index;
402272027Shselasky			txbbs_stamp = txbbs_skipped;
403272027Shselasky		} while (ring_index != new_index);
404219820Sjeff
405272027Shselasky		++cons_index;
406272027Shselasky		index = cons_index & size_mask;
407272027Shselasky		cqe = &buf[(index << factor) + factor];
408272027Shselasky	}
409219820Sjeff
410219820Sjeff
411219820Sjeff	/*
412219820Sjeff	 * To prevent CQ overflow we first update CQ consumer and only then
413219820Sjeff	 * the ring consumer.
414219820Sjeff	 */
415272027Shselasky	mcq->cons_index = cons_index;
416219820Sjeff	mlx4_cq_set_ci(mcq);
417219820Sjeff	wmb();
418219820Sjeff	ring->cons += txbbs_skipped;
419219820Sjeff
420291699Shselasky	return (0);
421219820Sjeff}
422219820Sjeff
423219820Sjeffvoid mlx4_en_tx_irq(struct mlx4_cq *mcq)
424219820Sjeff{
425219820Sjeff	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
426219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
427272027Shselasky	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
428219820Sjeff
429297966Shselasky	if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock))
430219820Sjeff		return;
431219820Sjeff	mlx4_en_process_tx_cq(cq->dev, cq);
432219820Sjeff	mod_timer(&cq->timer, jiffies + 1);
433219820Sjeff	spin_unlock(&ring->comp_lock);
434219820Sjeff}
435219820Sjeff
436219820Sjeffvoid mlx4_en_poll_tx_cq(unsigned long data)
437219820Sjeff{
438219820Sjeff	struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
439219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
440272027Shselasky	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
441219820Sjeff	u32 inflight;
442219820Sjeff
443219820Sjeff	INC_PERF_COUNTER(priv->pstats.tx_poll);
444219820Sjeff
445297966Shselasky	if (priv->port_up == 0)
446297966Shselasky		return;
447219820Sjeff	if (!spin_trylock(&ring->comp_lock)) {
448219820Sjeff		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
449219820Sjeff		return;
450219820Sjeff	}
451219820Sjeff	mlx4_en_process_tx_cq(cq->dev, cq);
452219820Sjeff	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
453219820Sjeff
454219820Sjeff	/* If there are still packets in flight and the timer has not already
455219820Sjeff	 * been scheduled by the Tx routine then schedule it here to guarantee
456219820Sjeff	 * completion processing of these packets */
457219820Sjeff	if (inflight && priv->port_up)
458219820Sjeff		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
459219820Sjeff
460219820Sjeff	spin_unlock(&ring->comp_lock);
461219820Sjeff}
462219820Sjeff
463219820Sjeffstatic inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
464219820Sjeff{
465272027Shselasky	struct mlx4_en_cq *cq = priv->tx_cq[tx_ind];
466272027Shselasky	struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
467219820Sjeff
468297966Shselasky	if (priv->port_up == 0)
469297966Shselasky		return;
470297966Shselasky
471219820Sjeff	/* If we don't have a pending timer, set one up to catch our recent
472219820Sjeff	   post in case the interface becomes idle */
473219820Sjeff	if (!timer_pending(&cq->timer))
474219820Sjeff		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
475219820Sjeff
476219820Sjeff	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
477219820Sjeff	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
478219820Sjeff		if (spin_trylock(&ring->comp_lock)) {
479219820Sjeff			mlx4_en_process_tx_cq(priv->dev, cq);
480219820Sjeff			spin_unlock(&ring->comp_lock);
481219820Sjeff		}
482219820Sjeff}
483219820Sjeff
484291699Shselaskystatic u16
485291699Shselaskymlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb)
486219820Sjeff{
487291699Shselasky	u16 retval;
488219820Sjeff
489291699Shselasky	/* only copy from first fragment, if possible */
490291699Shselasky	retval = MIN(ring->inline_thold, mb->m_len);
491219820Sjeff
492291699Shselasky	/* check for too little data */
493291699Shselasky	if (unlikely(retval < MIN_PKT_LEN))
494291699Shselasky		retval = MIN(ring->inline_thold, mb->m_pkthdr.len);
495291699Shselasky	return (retval);
496219820Sjeff}
497219820Sjeff
498291699Shselaskystatic int
499291699Shselaskymlx4_en_get_header_size(struct mbuf *mb)
500219820Sjeff{
501272027Shselasky	struct ether_vlan_header *eh;
502272027Shselasky        struct tcphdr *th;
503272027Shselasky        struct ip *ip;
504272027Shselasky        int ip_hlen, tcp_hlen;
505272027Shselasky	struct ip6_hdr *ip6;
506272027Shselasky	uint16_t eth_type;
507272027Shselasky	int eth_hdr_len;
508219820Sjeff
509272027Shselasky	eh = mtod(mb, struct ether_vlan_header *);
510272027Shselasky	if (mb->m_len < ETHER_HDR_LEN)
511219820Sjeff		return (0);
512272027Shselasky	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
513272027Shselasky		eth_type = ntohs(eh->evl_proto);
514272027Shselasky		eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
515272027Shselasky	} else {
516272027Shselasky		eth_type = ntohs(eh->evl_encap_proto);
517272027Shselasky		eth_hdr_len = ETHER_HDR_LEN;
518272027Shselasky	}
519272027Shselasky	if (mb->m_len < eth_hdr_len)
520219820Sjeff		return (0);
521272027Shselasky	switch (eth_type) {
522272027Shselasky	case ETHERTYPE_IP:
523272027Shselasky		ip = (struct ip *)(mb->m_data + eth_hdr_len);
524272027Shselasky		if (mb->m_len < eth_hdr_len + sizeof(*ip))
525272027Shselasky			return (0);
526272027Shselasky		if (ip->ip_p != IPPROTO_TCP)
527272027Shselasky			return (0);
528272027Shselasky		ip_hlen = ip->ip_hl << 2;
529272027Shselasky		eth_hdr_len += ip_hlen;
530272027Shselasky		break;
531272027Shselasky	case ETHERTYPE_IPV6:
532272027Shselasky		ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
533272027Shselasky		if (mb->m_len < eth_hdr_len + sizeof(*ip6))
534272027Shselasky			return (0);
535272027Shselasky		if (ip6->ip6_nxt != IPPROTO_TCP)
536272027Shselasky			return (0);
537272027Shselasky		eth_hdr_len += sizeof(*ip6);
538272027Shselasky		break;
539272027Shselasky	default:
540219820Sjeff		return (0);
541272027Shselasky	}
542272027Shselasky	if (mb->m_len < eth_hdr_len + sizeof(*th))
543272027Shselasky		return (0);
544272027Shselasky	th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
545219820Sjeff	tcp_hlen = th->th_off << 2;
546272027Shselasky	eth_hdr_len += tcp_hlen;
547272027Shselasky	if (mb->m_len < eth_hdr_len)
548219820Sjeff		return (0);
549272027Shselasky	return (eth_hdr_len);
550219820Sjeff}
551219820Sjeff
552291699Shselaskystatic volatile struct mlx4_wqe_data_seg *
553291699Shselaskymlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg,
554291699Shselasky    struct mbuf *mb, int len, __be32 owner_bit)
555219820Sjeff{
556291699Shselasky	uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
557291699Shselasky	const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4;
558219820Sjeff
559291699Shselasky	if (unlikely(len < MIN_PKT_LEN)) {
560291699Shselasky		m_copydata(mb, 0, len, inl + 4);
561291699Shselasky		memset(inl + 4 + len, 0, MIN_PKT_LEN - len);
562291699Shselasky		dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT);
563291699Shselasky	} else if (len <= spc) {
564291699Shselasky		m_copydata(mb, 0, len, inl + 4);
565291699Shselasky		dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
566291699Shselasky	} else {
567291699Shselasky		m_copydata(mb, 0, spc, inl + 4);
568291699Shselasky		m_copydata(mb, spc, len - spc, inl + 8 + spc);
569291699Shselasky		dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT);
570291699Shselasky	}
571291699Shselasky	return (dseg);
572219820Sjeff}
573219820Sjeff
574291699Shselaskystatic void
575291699Shselaskymlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg,
576291699Shselasky    int len, __be32 owner_bit)
577219820Sjeff{
578291699Shselasky	uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
579291699Shselasky	const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4;
580219820Sjeff
581291699Shselasky	if (unlikely(len < MIN_PKT_LEN)) {
582291699Shselasky		*(volatile uint32_t *)inl =
583291699Shselasky		    SET_BYTE_COUNT((1 << 31) | MIN_PKT_LEN);
584291699Shselasky	} else if (len <= spc) {
585291699Shselasky		*(volatile uint32_t *)inl =
586291699Shselasky		    SET_BYTE_COUNT((1 << 31) | len);
587219820Sjeff	} else {
588291699Shselasky		*(volatile uint32_t *)(inl + 4 + spc) =
589291699Shselasky		    SET_BYTE_COUNT((1 << 31) | (len - spc));
590219820Sjeff		wmb();
591291699Shselasky		*(volatile uint32_t *)inl =
592291699Shselasky		    SET_BYTE_COUNT((1 << 31) | spc);
593219820Sjeff	}
594219820Sjeff}
595219820Sjeff
596279891Shselaskystatic uint32_t hashrandom;
597272027Shselaskystatic void hashrandom_init(void *arg)
598272027Shselasky{
599286282Shselasky	/*
600286282Shselasky	 * It is assumed that the random subsystem has been
601286282Shselasky	 * initialized when this function is called:
602286282Shselasky	 */
603279891Shselasky	hashrandom = m_ether_tcpip_hash_init();
604272027Shselasky}
605286282ShselaskySYSINIT(hashrandom_init, SI_SUB_RANDOM, SI_ORDER_ANY, &hashrandom_init, NULL);
606272027Shselasky
607219820Sjeffu16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
608219820Sjeff{
609219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(dev);
610272027Shselasky	u32 rings_p_up = priv->num_tx_rings_p_up;
611272027Shselasky	u32 up = 0;
612272027Shselasky	u32 queue_index;
613219820Sjeff
614279584Shselasky#if (MLX4_EN_NUM_UP > 1)
615219820Sjeff	/* Obtain VLAN information if present */
616219820Sjeff	if (mb->m_flags & M_VLANTAG) {
617279584Shselasky		u32 vlan_tag = mb->m_pkthdr.ether_vtag;
618279584Shselasky	        up = (vlan_tag >> 13) % MLX4_EN_NUM_UP;
619219820Sjeff	}
620279584Shselasky#endif
621291694Shselasky	queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom);
622219820Sjeff
623272027Shselasky	return ((queue_index % rings_p_up) + (up * rings_p_up));
624219820Sjeff}
625219820Sjeff
626291694Shselaskystatic void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt)
627219820Sjeff{
628291694Shselasky	__iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8);
629219820Sjeff}
630219820Sjeff
631291699Shselaskystatic int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp)
632219820Sjeff{
633291699Shselasky	enum {
634291699Shselasky		DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT,
635291699Shselasky		CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
636291699Shselasky		    MLX4_WQE_CTRL_SOLICITED),
637291699Shselasky	};
638291699Shselasky	bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS];
639291699Shselasky	volatile struct mlx4_wqe_data_seg *dseg;
640291699Shselasky	volatile struct mlx4_wqe_data_seg *dseg_inline;
641291699Shselasky	volatile struct mlx4_en_tx_desc *tx_desc;
642291699Shselasky	struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
643291699Shselasky	struct ifnet *ifp = priv->dev;
644219820Sjeff	struct mlx4_en_tx_info *tx_info;
645291699Shselasky	struct mbuf *mb = *mbp;
646219820Sjeff	struct mbuf *m;
647291699Shselasky	__be32 owner_bit;
648219820Sjeff	int nr_segs;
649291699Shselasky	int pad;
650291699Shselasky	int err;
651291699Shselasky	u32 bf_size;
652291699Shselasky	u32 bf_prod;
653291699Shselasky	u32 opcode;
654291699Shselasky	u16 index;
655291699Shselasky	u16 ds_cnt;
656291699Shselasky	u16 ihs;
657219820Sjeff
658291699Shselasky	if (unlikely(!priv->port_up)) {
659291699Shselasky		err = EINVAL;
660219820Sjeff		goto tx_drop;
661219820Sjeff	}
662219820Sjeff
663291699Shselasky	/* check if TX ring is full */
664291699Shselasky	if (unlikely(mlx4_en_tx_ring_is_full(ring))) {
665219820Sjeff		/* Use interrupts to find out when queue opened */
666291699Shselasky		mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]);
667291699Shselasky		return (ENOBUFS);
668329159Shselasky	}
669219820Sjeff
670291699Shselasky	/* sanity check we are not wrapping around */
671291699Shselasky	KASSERT(((~ring->prod) & ring->size_mask) >=
672291699Shselasky	    (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring"));
673291699Shselasky
674219820Sjeff	/* Track current inflight packets for performance analysis */
675219820Sjeff	AVG_PERF_COUNTER(priv->pstats.inflight_avg,
676219820Sjeff			 (u32) (ring->prod - ring->cons - 1));
677219820Sjeff
678291699Shselasky	/* Track current mbuf packet header length */
679291699Shselasky	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len);
680291699Shselasky
681291699Shselasky	/* Grab an index and try to transmit packet */
682291699Shselasky	owner_bit = (ring->prod & ring->size) ?
683291699Shselasky		cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0;
684219820Sjeff	index = ring->prod & ring->size_mask;
685291699Shselasky	tx_desc = (volatile struct mlx4_en_tx_desc *)
686291699Shselasky	    (ring->buf + index * TXBB_SIZE);
687291699Shselasky	tx_info = &ring->tx_info[index];
688291699Shselasky	dseg = &tx_desc->data;
689219820Sjeff
690291699Shselasky	/* send a copy of the frame to the BPF listener, if any */
691291699Shselasky	if (ifp != NULL && ifp->if_bpf != NULL)
692291699Shselasky		ETHER_BPF_MTAP(ifp, mb);
693291699Shselasky
694291699Shselasky	/* get default flags */
695291699Shselasky	tx_desc->ctrl.srcrb_flags = CTRL_FLAGS;
696291699Shselasky
697291699Shselasky	if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
698291699Shselasky		tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
699291699Shselasky
700291699Shselasky	if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP |
701291699Shselasky	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
702291699Shselasky		tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM);
703291699Shselasky
704291699Shselasky	/* do statistics */
705291699Shselasky	if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) {
706291699Shselasky		priv->port_stats.tx_chksum_offload++;
707291699Shselasky		ring->tx_csum++;
708219820Sjeff	}
709219820Sjeff
710291699Shselasky	/* check for VLAN tag */
711291699Shselasky	if (mb->m_flags & M_VLANTAG) {
712291699Shselasky		tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag);
713329159Shselasky		tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
714291699Shselasky	} else {
715291699Shselasky		tx_desc->ctrl.vlan_tag = 0;
716291699Shselasky		tx_desc->ctrl.ins_vlan = 0;
717291699Shselasky	}
718272027Shselasky
719329159Shselasky	if (unlikely(mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)) {
720329159Shselasky		/*
721329159Shselasky		 * Copy destination MAC address to WQE. This allows
722329159Shselasky		 * loopback in eSwitch, so that VFs and PF can
723329159Shselasky		 * communicate with each other:
724329159Shselasky		 */
725329159Shselasky		m_copydata(mb, 0, 2, __DEVOLATILE(void *, &tx_desc->ctrl.srcrb_flags16[0]));
726329159Shselasky		m_copydata(mb, 2, 4, __DEVOLATILE(void *, &tx_desc->ctrl.imm));
727329159Shselasky	} else {
728329159Shselasky		/* clear immediate field */
729329159Shselasky		tx_desc->ctrl.imm = 0;
730329159Shselasky	}
731291699Shselasky
732291699Shselasky	/* Handle LSO (TSO) packets */
733291699Shselasky	if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
734291699Shselasky		u32 payload_len;
735291699Shselasky		u32 mss = mb->m_pkthdr.tso_segsz;
736291699Shselasky		u32 num_pkts;
737291699Shselasky
738291699Shselasky		opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) |
739291699Shselasky		    owner_bit;
740291699Shselasky		ihs = mlx4_en_get_header_size(mb);
741291699Shselasky		if (unlikely(ihs > MAX_INLINE)) {
742291699Shselasky			ring->oversized_packets++;
743291699Shselasky			err = EINVAL;
744291699Shselasky			goto tx_drop;
745291699Shselasky		}
746291699Shselasky		tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs);
747291699Shselasky		payload_len = mb->m_pkthdr.len - ihs;
748291699Shselasky		if (unlikely(payload_len == 0))
749291699Shselasky			num_pkts = 1;
750291699Shselasky		else
751291699Shselasky			num_pkts = DIV_ROUND_UP(payload_len, mss);
752291699Shselasky		ring->bytes += payload_len + (num_pkts * ihs);
753291699Shselasky		ring->packets += num_pkts;
754329159Shselasky		ring->tso_packets++;
755291699Shselasky		/* store pointer to inline header */
756291699Shselasky		dseg_inline = dseg;
757291699Shselasky		/* copy data inline */
758291699Shselasky		dseg = mlx4_en_store_inline_lso_data(dseg,
759291699Shselasky		    mb, ihs, owner_bit);
760291699Shselasky	} else {
761291699Shselasky		opcode = cpu_to_be32(MLX4_OPCODE_SEND) |
762291699Shselasky		    owner_bit;
763291699Shselasky		ihs = mlx4_en_get_inline_hdr_size(ring, mb);
764291699Shselasky		ring->bytes += max_t (unsigned int,
765291699Shselasky		    mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
766291699Shselasky		ring->packets++;
767291699Shselasky		/* store pointer to inline header */
768291699Shselasky		dseg_inline = dseg;
769291699Shselasky		/* copy data inline */
770291699Shselasky		dseg = mlx4_en_store_inline_data(dseg,
771291699Shselasky		    mb, ihs, owner_bit);
772272027Shselasky	}
773291699Shselasky	m_adj(mb, ihs);
774272027Shselasky
775291699Shselasky	err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
776291699Shselasky	    mb, segs, &nr_segs, BUS_DMA_NOWAIT);
777291699Shselasky	if (unlikely(err == EFBIG)) {
778291699Shselasky		/* Too many mbuf fragments */
779329159Shselasky		ring->defrag_attempts++;
780291699Shselasky		m = m_defrag(mb, M_NOWAIT);
781291699Shselasky		if (m == NULL) {
782291699Shselasky			ring->oversized_packets++;
783291699Shselasky			goto tx_drop;
784291699Shselasky		}
785291699Shselasky		mb = m;
786291699Shselasky		/* Try again */
787291699Shselasky		err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
788291699Shselasky		    mb, segs, &nr_segs, BUS_DMA_NOWAIT);
789291699Shselasky	}
790291699Shselasky	/* catch errors */
791291699Shselasky	if (unlikely(err != 0)) {
792291699Shselasky		ring->oversized_packets++;
793291699Shselasky		goto tx_drop;
794291699Shselasky	}
795329159Shselasky	/* If there were no errors and we didn't load anything, don't sync. */
796329159Shselasky	if (nr_segs != 0) {
797329159Shselasky		/* make sure all mbuf data is written to RAM */
798329159Shselasky		bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
799329159Shselasky		    BUS_DMASYNC_PREWRITE);
800329159Shselasky	} else {
801329159Shselasky		/* All data was inlined, free the mbuf. */
802329159Shselasky		bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
803329159Shselasky		m_freem(mb);
804329159Shselasky		mb = NULL;
805329159Shselasky	}
806291699Shselasky
807291699Shselasky	/* compute number of DS needed */
808291699Shselasky	ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs;
809291699Shselasky
810291699Shselasky	/*
811291699Shselasky	 * Check if the next request can wrap around and fill the end
812291699Shselasky	 * of the current request with zero immediate data:
813291699Shselasky	 */
814291699Shselasky	pad = DIV_ROUND_UP(ds_cnt, DS_FACT);
815291699Shselasky	pad = (~(ring->prod + pad)) & ring->size_mask;
816291699Shselasky
817291699Shselasky	if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) {
818291699Shselasky		/*
819291699Shselasky		 * Compute the least number of DS blocks we need to
820291699Shselasky		 * pad in order to achieve a TX ring wraparound:
821291699Shselasky		 */
822291699Shselasky		pad = (DS_FACT * (pad + 1));
823272027Shselasky	} else {
824291699Shselasky		/*
825291699Shselasky		 * The hardware will automatically jump to the next
826291699Shselasky		 * TXBB. No need for padding.
827291699Shselasky		 */
828291699Shselasky		pad = 0;
829272027Shselasky	}
830272027Shselasky
831291699Shselasky	/* compute total number of DS blocks */
832291699Shselasky	ds_cnt += pad;
833291699Shselasky	/*
834291699Shselasky	 * When modifying this code, please ensure that the following
835291699Shselasky	 * computation is always less than or equal to 0x3F:
836291699Shselasky	 *
837291699Shselasky	 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) +
838291699Shselasky	 * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT)
839291699Shselasky	 *
840291699Shselasky	 * Else the "ds_cnt" variable can become too big.
841291699Shselasky	 */
842291699Shselasky	tx_desc->ctrl.fence_size = (ds_cnt & 0x3f);
843272027Shselasky
844291699Shselasky	/* store pointer to mbuf */
845291699Shselasky	tx_info->mb = mb;
846291699Shselasky	tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT);
847291699Shselasky	bf_size = ds_cnt * DS_SIZE_ALIGNMENT;
848291699Shselasky	bf_prod = ring->prod;
849219820Sjeff
850291699Shselasky	/* compute end of "dseg" array */
851291699Shselasky	dseg += nr_segs + pad;
852291699Shselasky
853291699Shselasky	/* pad using zero immediate dseg */
854291699Shselasky	while (pad--) {
855291699Shselasky		dseg--;
856291699Shselasky		dseg->addr = 0;
857291699Shselasky		dseg->lkey = 0;
858291699Shselasky		wmb();
859291699Shselasky		dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0);
860291699Shselasky	}
861291699Shselasky
862291699Shselasky	/* fill segment list */
863291699Shselasky	while (nr_segs--) {
864291699Shselasky		if (unlikely(segs[nr_segs].ds_len == 0)) {
865291699Shselasky			dseg--;
866291699Shselasky			dseg->addr = 0;
867291699Shselasky			dseg->lkey = 0;
868291699Shselasky			wmb();
869291699Shselasky			dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0);
870291699Shselasky		} else {
871291699Shselasky			dseg--;
872291699Shselasky			dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr);
873291699Shselasky			dseg->lkey = cpu_to_be32(priv->mdev->mr.key);
874291699Shselasky			wmb();
875291699Shselasky			dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len);
876291699Shselasky		}
877291699Shselasky	}
878291699Shselasky
879291699Shselasky	wmb();
880291699Shselasky
881291699Shselasky	/* write owner bits in reverse order */
882291699Shselasky	if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO))
883291699Shselasky		mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit);
884291699Shselasky	else
885291699Shselasky		mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit);
886291699Shselasky
887291699Shselasky	/* update producer counter */
888291699Shselasky	ring->prod += tx_info->nr_txbb;
889219820Sjeff
890291699Shselasky	if (ring->bf_enabled && bf_size <= MAX_BF &&
891329159Shselasky	    (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_CVLAN)) {
892219820Sjeff
893291699Shselasky		/* store doorbell number */
894291699Shselasky		*(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
895219820Sjeff
896291699Shselasky		/* or in producer number for this WQE */
897291699Shselasky		opcode |= cpu_to_be32((bf_prod & 0xffff) << 8);
898219820Sjeff
899291699Shselasky		/*
900291699Shselasky		 * Ensure the new descriptor hits memory before
901291699Shselasky		 * setting ownership of this descriptor to HW:
902291699Shselasky		 */
903219820Sjeff		wmb();
904291699Shselasky		tx_desc->ctrl.owner_opcode = opcode;
905219820Sjeff		wmb();
906291699Shselasky		mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset,
907291699Shselasky		     (volatile unsigned long *) &tx_desc->ctrl, bf_size);
908219820Sjeff		wmb();
909219820Sjeff		ring->bf.offset ^= ring->bf.buf_size;
910219820Sjeff	} else {
911291699Shselasky		/*
912291699Shselasky		 * Ensure the new descriptor hits memory before
913291699Shselasky		 * setting ownership of this descriptor to HW:
914291699Shselasky		 */
915219820Sjeff		wmb();
916291699Shselasky		tx_desc->ctrl.owner_opcode = opcode;
917219820Sjeff		wmb();
918291699Shselasky		writel(cpu_to_be32(ring->doorbell_qpn),
919291699Shselasky		    ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL);
920219820Sjeff	}
921219820Sjeff
922291699Shselasky	return (0);
923219820Sjefftx_drop:
924219820Sjeff	*mbp = NULL;
925219820Sjeff	m_freem(mb);
926291699Shselasky	return (err);
927219820Sjeff}
928219820Sjeff
929219820Sjeffstatic int
930341909Shselaskymlx4_en_transmit_locked(struct ifnet *ifp, int tx_ind, struct mbuf *mb)
931219820Sjeff{
932341909Shselasky	struct mlx4_en_priv *priv = netdev_priv(ifp);
933341909Shselasky	struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
934341909Shselasky	int err = 0;
935219820Sjeff
936341909Shselasky	if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
937341909Shselasky	    READ_ONCE(priv->port_up) == 0)) {
938341909Shselasky		m_freem(mb);
939341909Shselasky		return (ENETDOWN);
940219820Sjeff	}
941219820Sjeff
942341909Shselasky	if (mlx4_en_xmit(priv, tx_ind, &mb) != 0) {
943341909Shselasky		/* NOTE: m_freem() is NULL safe */
944341909Shselasky		m_freem(mb);
945341909Shselasky		err = ENOBUFS;
946341909Shselasky		if (ring->watchdog_time == 0)
947341909Shselasky			ring->watchdog_time = ticks + MLX4_EN_WATCHDOG_TIMEOUT;
948341909Shselasky	} else {
949341909Shselasky		ring->watchdog_time = 0;
950219820Sjeff	}
951219820Sjeff	return (err);
952219820Sjeff}
953219820Sjeff
954219820Sjeffint
955219820Sjeffmlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
956219820Sjeff{
957219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(dev);
958219820Sjeff	struct mlx4_en_tx_ring *ring;
959275358Shselasky	int i, err = 0;
960219820Sjeff
961297966Shselasky	if (priv->port_up == 0) {
962297966Shselasky		m_freem(m);
963297966Shselasky		return (ENETDOWN);
964297966Shselasky	}
965297966Shselasky
966275358Shselasky	/* Compute which queue to use */
967291694Shselasky	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
968296910Shselasky		i = (m->m_pkthdr.flowid % 128) % priv->tx_ring_num;
969291694Shselasky	}
970291694Shselasky	else {
971291694Shselasky		i = mlx4_en_select_queue(dev, m);
972291694Shselasky	}
973275358Shselasky
974272027Shselasky	ring = priv->tx_ring[i];
975219820Sjeff
976341909Shselasky	spin_lock(&ring->tx_lock);
977341909Shselasky
978341909Shselasky	err = mlx4_en_transmit_locked(dev, i, m);
979341909Shselasky	spin_unlock(&ring->tx_lock);
980341909Shselasky
981341909Shselasky	/* Poll CQ here */
982341909Shselasky	mlx4_en_xmit_poll(priv, i);
983341909Shselasky
984329159Shselasky#if __FreeBSD_version >= 1100000
985329159Shselasky	if (unlikely(err != 0))
986329159Shselasky		if_inc_counter(dev, IFCOUNTER_IQDROPS, 1);
987329159Shselasky#endif
988219820Sjeff	return (err);
989219820Sjeff}
990219820Sjeff
991219820Sjeff/*
992219820Sjeff * Flush ring buffers.
993219820Sjeff */
994219820Sjeffvoid
995219820Sjeffmlx4_en_qflush(struct ifnet *dev)
996219820Sjeff{
997219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(dev);
998219820Sjeff
999297966Shselasky	if (priv->port_up == 0)
1000297966Shselasky		return;
1001297966Shselasky
1002219820Sjeff	if_qflush(dev);
1003219820Sjeff}
1004