1219820Sjeff/*
2272027Shselasky * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3219820Sjeff *
4219820Sjeff * This software is available to you under a choice of one of two
5219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
6219820Sjeff * General Public License (GPL) Version 2, available from the file
7219820Sjeff * COPYING in the main directory of this source tree, or the
8219820Sjeff * OpenIB.org BSD license below:
9219820Sjeff *
10219820Sjeff *     Redistribution and use in source and binary forms, with or
11219820Sjeff *     without modification, are permitted provided that the following
12219820Sjeff *     conditions are met:
13219820Sjeff *
14219820Sjeff *      - Redistributions of source code must retain the above
15219820Sjeff *        copyright notice, this list of conditions and the following
16219820Sjeff *        disclaimer.
17219820Sjeff *
18219820Sjeff *      - Redistributions in binary form must reproduce the above
19219820Sjeff *        copyright notice, this list of conditions and the following
20219820Sjeff *        disclaimer in the documentation and/or other materials
21219820Sjeff *        provided with the distribution.
22219820Sjeff *
23219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30219820Sjeff * SOFTWARE.
31219820Sjeff *
32219820Sjeff */
33234183Sjhb#include "opt_inet.h"
34306486Shselasky#include <dev/mlx4/cq.h>
35272027Shselasky#include <linux/slab.h>
36306486Shselasky#include <dev/mlx4/qp.h>
37272027Shselasky#include <linux/if_ether.h>
38272027Shselasky#include <linux/if_vlan.h>
39272027Shselasky#include <linux/vmalloc.h>
40306486Shselasky#include <dev/mlx4/driver.h>
41272027Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
42272027Shselasky#include <net/busy_poll.h>
43272027Shselasky#endif
44219820Sjeff
45306486Shselasky#include "en.h"
46219820Sjeff
47341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
48272027Shselaskystatic void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
49272027Shselasky				 struct mlx4_en_rx_ring *ring,
50272027Shselasky				 int index)
51272027Shselasky{
52341912Shselasky	struct mlx4_en_rx_desc *rx_desc =
53341912Shselasky	    ((struct mlx4_en_rx_desc *)ring->buf) + index;
54272027Shselasky	int i;
55272027Shselasky
56272027Shselasky	/* Set size and memtype fields */
57297967Shselasky	rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size - MLX4_NET_IP_ALIGN);
58291699Shselasky	rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key);
59272027Shselasky
60291699Shselasky	/*
61291699Shselasky	 * If the number of used fragments does not fill up the ring
62291699Shselasky	 * stride, remaining (unused) fragments must be padded with
63291699Shselasky	 * null address/size and a special memory key:
64291699Shselasky	 */
65341912Shselasky	for (i = 1; i < MLX4_EN_MAX_RX_SEGS; i++) {
66272027Shselasky		rx_desc->data[i].byte_count = 0;
67272027Shselasky		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
68272027Shselasky		rx_desc->data[i].addr = 0;
69272027Shselasky	}
70272027Shselasky}
71341912Shselasky#endif
72272027Shselasky
73341912Shselaskystatic inline struct mbuf *
74341912Shselaskymlx4_en_alloc_mbuf(struct mlx4_en_rx_ring *ring)
75341912Shselasky{
76341912Shselasky	struct mbuf *mb;
77341912Shselasky
78341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
79341912Shselasky        mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
80341912Shselasky        if (likely(mb != NULL))
81341912Shselasky		mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
82341912Shselasky#else
83341912Shselasky	mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MLX4_EN_MAX_RX_BYTES);
84341912Shselasky	if (likely(mb != NULL)) {
85341912Shselasky		struct mbuf *mb_head = mb;
86341912Shselasky		int i;
87341912Shselasky
88341912Shselasky		mb->m_len = MLX4_EN_MAX_RX_BYTES;
89341912Shselasky		mb->m_pkthdr.len = MLX4_EN_MAX_RX_BYTES;
90341912Shselasky
91341912Shselasky		for (i = 1; i != MLX4_EN_MAX_RX_SEGS; i++) {
92341912Shselasky			if (mb_head->m_pkthdr.len >= ring->rx_mb_size)
93341912Shselasky				break;
94341912Shselasky			mb = (mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0, MLX4_EN_MAX_RX_BYTES));
95341912Shselasky			if (unlikely(mb == NULL)) {
96341912Shselasky				m_freem(mb_head);
97341912Shselasky				return (NULL);
98341912Shselasky			}
99341912Shselasky			mb->m_len = MLX4_EN_MAX_RX_BYTES;
100341912Shselasky			mb_head->m_pkthdr.len += MLX4_EN_MAX_RX_BYTES;
101341912Shselasky		}
102341912Shselasky		/* rewind to first mbuf in chain */
103341912Shselasky		mb = mb_head;
104341912Shselasky	}
105341912Shselasky#endif
106341912Shselasky	return (mb);
107341912Shselasky}
108341912Shselasky
109291699Shselaskystatic int
110341912Shselaskymlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc,
111341912Shselasky    struct mlx4_en_rx_mbuf *mb_list)
112219820Sjeff{
113341912Shselasky	bus_dma_segment_t segs[MLX4_EN_MAX_RX_SEGS];
114291699Shselasky	bus_dmamap_t map;
115219820Sjeff	struct mbuf *mb;
116291699Shselasky	int nsegs;
117291699Shselasky	int err;
118341912Shselasky#if (MLX4_EN_MAX_RX_SEGS != 1)
119341912Shselasky	int i;
120341912Shselasky#endif
121219820Sjeff
122291699Shselasky	/* try to allocate a new spare mbuf */
123291699Shselasky	if (unlikely(ring->spare.mbuf == NULL)) {
124341912Shselasky		mb = mlx4_en_alloc_mbuf(ring);
125291699Shselasky		if (unlikely(mb == NULL))
126291699Shselasky			return (-ENOMEM);
127291699Shselasky
128297967Shselasky		/* make sure IP header gets aligned */
129297967Shselasky		m_adj(mb, MLX4_NET_IP_ALIGN);
130297967Shselasky
131291699Shselasky		/* load spare mbuf into BUSDMA */
132291699Shselasky		err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
133341912Shselasky		    mb, ring->spare.segs, &nsegs, BUS_DMA_NOWAIT);
134291699Shselasky		if (unlikely(err != 0)) {
135291699Shselasky			m_freem(mb);
136291699Shselasky			return (err);
137291699Shselasky		}
138291699Shselasky
139291699Shselasky		/* store spare info */
140291699Shselasky		ring->spare.mbuf = mb;
141291699Shselasky
142341912Shselasky#if (MLX4_EN_MAX_RX_SEGS != 1)
143341912Shselasky		/* zero remaining segs */
144341912Shselasky		for (i = nsegs; i != MLX4_EN_MAX_RX_SEGS; i++) {
145341912Shselasky			ring->spare.segs[i].ds_addr = 0;
146341912Shselasky			ring->spare.segs[i].ds_len = 0;
147341912Shselasky		}
148341912Shselasky#endif
149291699Shselasky		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
150291699Shselasky		    BUS_DMASYNC_PREREAD);
151219820Sjeff	}
152219820Sjeff
153291699Shselasky	/* synchronize and unload the current mbuf, if any */
154291699Shselasky	if (likely(mb_list->mbuf != NULL)) {
155291699Shselasky		bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
156291699Shselasky		    BUS_DMASYNC_POSTREAD);
157291699Shselasky		bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
158291699Shselasky	}
159219820Sjeff
160341912Shselasky	mb = mlx4_en_alloc_mbuf(ring);
161291699Shselasky	if (unlikely(mb == NULL))
162291699Shselasky		goto use_spare;
163219820Sjeff
164297967Shselasky	/* make sure IP header gets aligned */
165297967Shselasky	m_adj(mb, MLX4_NET_IP_ALIGN);
166297967Shselasky
167291699Shselasky	err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
168291699Shselasky	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
169291699Shselasky	if (unlikely(err != 0)) {
170291699Shselasky		m_freem(mb);
171291699Shselasky		goto use_spare;
172291699Shselasky	}
173219820Sjeff
174341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
175341912Shselasky	rx_desc->data[0].addr = cpu_to_be64(segs[0].ds_addr);
176341912Shselasky#else
177341912Shselasky	for (i = 0; i != nsegs; i++) {
178341912Shselasky		rx_desc->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
179341912Shselasky		rx_desc->data[i].lkey = ring->rx_mr_key_be;
180341912Shselasky		rx_desc->data[i].addr = cpu_to_be64(segs[i].ds_addr);
181341912Shselasky	}
182341912Shselasky	for (; i != MLX4_EN_MAX_RX_SEGS; i++) {
183341912Shselasky		rx_desc->data[i].byte_count = 0;
184341912Shselasky		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
185341912Shselasky		rx_desc->data[i].addr = 0;
186341912Shselasky	}
187341912Shselasky#endif
188291699Shselasky	mb_list->mbuf = mb;
189291699Shselasky
190291699Shselasky	bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
191291699Shselasky	return (0);
192291699Shselasky
193291699Shselaskyuse_spare:
194291699Shselasky	/* swap DMA maps */
195291699Shselasky	map = mb_list->dma_map;
196291699Shselasky	mb_list->dma_map = ring->spare.dma_map;
197291699Shselasky	ring->spare.dma_map = map;
198291699Shselasky
199291699Shselasky	/* swap MBUFs */
200291699Shselasky	mb_list->mbuf = ring->spare.mbuf;
201291699Shselasky	ring->spare.mbuf = NULL;
202291699Shselasky
203291699Shselasky	/* store physical address */
204341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
205341912Shselasky	rx_desc->data[0].addr = cpu_to_be64(ring->spare.segs[0].ds_addr);
206341912Shselasky#else
207341912Shselasky	for (i = 0; i != MLX4_EN_MAX_RX_SEGS; i++) {
208341912Shselasky		if (ring->spare.segs[i].ds_len != 0) {
209341912Shselasky			rx_desc->data[i].byte_count = cpu_to_be32(ring->spare.segs[i].ds_len);
210341912Shselasky			rx_desc->data[i].lkey = ring->rx_mr_key_be;
211341912Shselasky			rx_desc->data[i].addr = cpu_to_be64(ring->spare.segs[i].ds_addr);
212341912Shselasky		} else {
213341912Shselasky			rx_desc->data[i].byte_count = 0;
214341912Shselasky			rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
215341912Shselasky			rx_desc->data[i].addr = 0;
216341912Shselasky		}
217341912Shselasky	}
218341912Shselasky#endif
219291699Shselasky	return (0);
220219820Sjeff}
221219820Sjeff
222291699Shselaskystatic void
223291699Shselaskymlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
224219820Sjeff{
225291699Shselasky	bus_dmamap_t map = mb_list->dma_map;
226291699Shselasky	bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
227291699Shselasky	bus_dmamap_unload(ring->dma_tag, map);
228291699Shselasky	m_freem(mb_list->mbuf);
229291699Shselasky	mb_list->mbuf = NULL;	/* safety clearing */
230219820Sjeff}
231219820Sjeff
232291699Shselaskystatic int
233291699Shselaskymlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
234291699Shselasky    struct mlx4_en_rx_ring *ring, int index)
235219820Sjeff{
236341912Shselasky	struct mlx4_en_rx_desc *rx_desc =
237341912Shselasky	    ((struct mlx4_en_rx_desc *)ring->buf) + index;
238291699Shselasky	struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
239219820Sjeff
240291699Shselasky	mb_list->mbuf = NULL;
241219820Sjeff
242341912Shselasky	if (mlx4_en_alloc_buf(ring, rx_desc, mb_list)) {
243291699Shselasky		priv->port_stats.rx_alloc_failed++;
244291699Shselasky		return (-ENOMEM);
245219820Sjeff	}
246291699Shselasky	return (0);
247219820Sjeff}
248219820Sjeff
249291699Shselaskystatic inline void
250291699Shselaskymlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
251291699Shselasky{
252291699Shselasky	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
253291699Shselasky}
254291699Shselasky
255219820Sjeffstatic int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
256219820Sjeff{
257219820Sjeff	struct mlx4_en_rx_ring *ring;
258219820Sjeff	int ring_ind;
259219820Sjeff	int buf_ind;
260219820Sjeff	int new_size;
261219820Sjeff	int err;
262219820Sjeff
263219820Sjeff	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
264219820Sjeff		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
265272027Shselasky			ring = priv->rx_ring[ring_ind];
266219820Sjeff
267219859Sjeff			err = mlx4_en_prepare_rx_desc(priv, ring,
268219859Sjeff						      ring->actual_size);
269219820Sjeff			if (err) {
270219820Sjeff				if (ring->actual_size == 0) {
271219820Sjeff					en_err(priv, "Failed to allocate "
272219820Sjeff						     "enough rx buffers\n");
273219820Sjeff					return -ENOMEM;
274219820Sjeff				} else {
275272027Shselasky					new_size =
276272027Shselasky						rounddown_pow_of_two(ring->actual_size);
277219820Sjeff					en_warn(priv, "Only %d buffers allocated "
278219820Sjeff						      "reducing ring size to %d\n",
279219820Sjeff						ring->actual_size, new_size);
280219820Sjeff					goto reduce_rings;
281219820Sjeff				}
282219820Sjeff			}
283219820Sjeff			ring->actual_size++;
284219820Sjeff			ring->prod++;
285219820Sjeff		}
286219820Sjeff	}
287219820Sjeff	return 0;
288219820Sjeff
289219820Sjeffreduce_rings:
290219820Sjeff	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
291272027Shselasky		ring = priv->rx_ring[ring_ind];
292219820Sjeff		while (ring->actual_size > new_size) {
293219820Sjeff			ring->actual_size--;
294219820Sjeff			ring->prod--;
295291699Shselasky			mlx4_en_free_buf(ring,
296291699Shselasky			    ring->mbuf + ring->actual_size);
297219820Sjeff		}
298219820Sjeff	}
299219820Sjeff
300219820Sjeff	return 0;
301219820Sjeff}
302219820Sjeff
303219820Sjeffstatic void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
304219820Sjeff				struct mlx4_en_rx_ring *ring)
305219820Sjeff{
306219820Sjeff	int index;
307219820Sjeff
308219820Sjeff	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
309219820Sjeff	       ring->cons, ring->prod);
310219820Sjeff
311219820Sjeff	/* Unmap and free Rx buffers */
312219820Sjeff	BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
313219820Sjeff	while (ring->cons != ring->prod) {
314219820Sjeff		index = ring->cons & ring->size_mask;
315219820Sjeff		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
316291699Shselasky		mlx4_en_free_buf(ring, ring->mbuf + index);
317219820Sjeff		++ring->cons;
318219820Sjeff	}
319219820Sjeff}
320219820Sjeff
321329159Shselaskyvoid mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
322329159Shselasky{
323329159Shselasky	int i;
324329159Shselasky	int num_of_eqs;
325329159Shselasky	int num_rx_rings;
326329159Shselasky	struct mlx4_dev *dev = mdev->dev;
327329159Shselasky
328329159Shselasky	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
329329159Shselasky		num_of_eqs = max_t(int, MIN_RX_RINGS,
330329159Shselasky				   min_t(int,
331329159Shselasky					 mlx4_get_eqs_per_port(mdev->dev, i),
332329159Shselasky					 DEF_RX_RINGS));
333329159Shselasky
334329159Shselasky		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
335329159Shselasky							   num_of_eqs;
336329159Shselasky		mdev->profile.prof[i].rx_ring_num =
337329159Shselasky			rounddown_pow_of_two(num_rx_rings);
338329159Shselasky	}
339329159Shselasky}
340329159Shselasky
341272027Shselaskyvoid mlx4_en_calc_rx_buf(struct net_device *dev)
342272027Shselasky{
343272027Shselasky	struct mlx4_en_priv *priv = netdev_priv(dev);
344297967Shselasky	int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN +
345297967Shselasky	    MLX4_NET_IP_ALIGN;
346272027Shselasky
347291699Shselasky	if (eff_mtu > MJUM16BYTES) {
348329159Shselasky		en_err(priv, "MTU(%u) is too big\n", (unsigned)dev->if_mtu);
349291699Shselasky                eff_mtu = MJUM16BYTES;
350291699Shselasky        } else if (eff_mtu > MJUM9BYTES) {
351291699Shselasky                eff_mtu = MJUM16BYTES;
352291699Shselasky        } else if (eff_mtu > MJUMPAGESIZE) {
353291699Shselasky                eff_mtu = MJUM9BYTES;
354291699Shselasky        } else if (eff_mtu > MCLBYTES) {
355291699Shselasky                eff_mtu = MJUMPAGESIZE;
356291699Shselasky        } else {
357291699Shselasky                eff_mtu = MCLBYTES;
358291699Shselasky        }
359272027Shselasky
360272027Shselasky	priv->rx_mb_size = eff_mtu;
361272027Shselasky
362291699Shselasky	en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu);
363272027Shselasky}
364272027Shselasky
365219820Sjeffint mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
366272027Shselasky			   struct mlx4_en_rx_ring **pring,
367272027Shselasky			   u32 size, int node)
368219820Sjeff{
369219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
370272027Shselasky	struct mlx4_en_rx_ring *ring;
371291699Shselasky	int err;
372219820Sjeff	int tmp;
373291699Shselasky	uint32_t x;
374219820Sjeff
375272027Shselasky        ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
376272027Shselasky        if (!ring) {
377272027Shselasky                en_err(priv, "Failed to allocate RX ring structure\n");
378272027Shselasky                return -ENOMEM;
379272027Shselasky        }
380291699Shselasky
381291699Shselasky	/* Create DMA descriptor TAG */
382291699Shselasky	if ((err = -bus_dma_tag_create(
383291699Shselasky	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
384291699Shselasky	    1,				/* any alignment */
385291699Shselasky	    0,				/* no boundary */
386291699Shselasky	    BUS_SPACE_MAXADDR,		/* lowaddr */
387291699Shselasky	    BUS_SPACE_MAXADDR,		/* highaddr */
388291699Shselasky	    NULL, NULL,			/* filter, filterarg */
389291699Shselasky	    MJUM16BYTES,		/* maxsize */
390341912Shselasky	    MLX4_EN_MAX_RX_SEGS,	/* nsegments */
391291699Shselasky	    MJUM16BYTES,		/* maxsegsize */
392291699Shselasky	    0,				/* flags */
393291699Shselasky	    NULL, NULL,			/* lockfunc, lockfuncarg */
394291699Shselasky	    &ring->dma_tag))) {
395291699Shselasky		en_err(priv, "Failed to create DMA tag\n");
396291699Shselasky		goto err_ring;
397291699Shselasky	}
398291699Shselasky
399219820Sjeff	ring->prod = 0;
400219820Sjeff	ring->cons = 0;
401219820Sjeff	ring->size = size;
402219820Sjeff	ring->size_mask = size - 1;
403219820Sjeff
404341912Shselasky	ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc));
405341912Shselasky	ring->buf_size = (ring->size * sizeof(struct mlx4_en_rx_desc)) + TXBB_SIZE;
406341912Shselasky
407291699Shselasky	tmp = size * sizeof(struct mlx4_en_rx_mbuf);
408219820Sjeff
409291699Shselasky        ring->mbuf = kzalloc(tmp, GFP_KERNEL);
410291699Shselasky        if (ring->mbuf == NULL) {
411272027Shselasky                err = -ENOMEM;
412291699Shselasky                goto err_dma_tag;
413272027Shselasky        }
414219820Sjeff
415291699Shselasky	err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
416291699Shselasky	if (err != 0)
417291699Shselasky		goto err_info;
418272027Shselasky
419291699Shselasky	for (x = 0; x != size; x++) {
420291699Shselasky		err = -bus_dmamap_create(ring->dma_tag, 0,
421291699Shselasky		    &ring->mbuf[x].dma_map);
422291699Shselasky		if (err != 0) {
423291699Shselasky			while (x--)
424291699Shselasky				bus_dmamap_destroy(ring->dma_tag,
425291699Shselasky				    ring->mbuf[x].dma_map);
426291699Shselasky			goto err_info;
427291699Shselasky		}
428291699Shselasky	}
429291699Shselasky	en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
430291699Shselasky		 ring->mbuf, tmp);
431291699Shselasky
432219820Sjeff	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
433219820Sjeff				 ring->buf_size, 2 * PAGE_SIZE);
434219820Sjeff	if (err)
435291699Shselasky		goto err_dma_map;
436219820Sjeff
437219820Sjeff	err = mlx4_en_map_buffer(&ring->wqres.buf);
438219820Sjeff	if (err) {
439219820Sjeff		en_err(priv, "Failed to map RX buffer\n");
440219820Sjeff		goto err_hwq;
441219820Sjeff	}
442219820Sjeff	ring->buf = ring->wqres.buf.direct.buf;
443272027Shselasky	*pring = ring;
444219820Sjeff	return 0;
445219820Sjeff
446219820Sjefferr_hwq:
447219820Sjeff	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
448291699Shselaskyerr_dma_map:
449291699Shselasky	for (x = 0; x != size; x++) {
450291699Shselasky		bus_dmamap_destroy(ring->dma_tag,
451291699Shselasky		    ring->mbuf[x].dma_map);
452291699Shselasky	}
453291699Shselasky	bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
454272027Shselaskyerr_info:
455291699Shselasky	vfree(ring->mbuf);
456291699Shselaskyerr_dma_tag:
457291699Shselasky	bus_dma_tag_destroy(ring->dma_tag);
458219820Sjefferr_ring:
459272027Shselasky	kfree(ring);
460291699Shselasky	return (err);
461219820Sjeff}
462219820Sjeff
463219820Sjeffint mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
464219820Sjeff{
465219820Sjeff	struct mlx4_en_rx_ring *ring;
466341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
467219820Sjeff	int i;
468341912Shselasky#endif
469219820Sjeff	int ring_ind;
470219820Sjeff	int err;
471272027Shselasky
472219820Sjeff	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
473272027Shselasky		ring = priv->rx_ring[ring_ind];
474219820Sjeff
475219820Sjeff		ring->prod = 0;
476219820Sjeff		ring->cons = 0;
477219820Sjeff		ring->actual_size = 0;
478272027Shselasky		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
479272027Shselasky                ring->rx_mb_size = priv->rx_mb_size;
480272027Shselasky
481341912Shselasky		if (sizeof(struct mlx4_en_rx_desc) <= TXBB_SIZE) {
482329159Shselasky			/* Stamp first unused send wqe */
483329159Shselasky			__be32 *ptr = (__be32 *)ring->buf;
484329159Shselasky			__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
485329159Shselasky			*ptr = stamp;
486329159Shselasky			/* Move pointer to start of rx section */
487219820Sjeff			ring->buf += TXBB_SIZE;
488329159Shselasky		}
489219820Sjeff
490341912Shselasky		ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc));
491341912Shselasky		ring->buf_size = ring->size * sizeof(struct mlx4_en_rx_desc);
492219820Sjeff
493219820Sjeff		memset(ring->buf, 0, ring->buf_size);
494219820Sjeff		mlx4_en_update_rx_prod_db(ring);
495219820Sjeff
496341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
497272027Shselasky		/* Initialize all descriptors */
498219859Sjeff		for (i = 0; i < ring->size; i++)
499219859Sjeff			mlx4_en_init_rx_desc(priv, ring, i);
500341912Shselasky#endif
501341912Shselasky		ring->rx_mr_key_be = cpu_to_be32(priv->mdev->mr.key);
502272027Shselasky
503234183Sjhb#ifdef INET
504219820Sjeff		/* Configure lro mngr */
505219820Sjeff		if (priv->dev->if_capenable & IFCAP_LRO) {
506219820Sjeff			if (tcp_lro_init(&ring->lro))
507219820Sjeff				priv->dev->if_capenable &= ~IFCAP_LRO;
508219820Sjeff			else
509219820Sjeff				ring->lro.ifp = priv->dev;
510219820Sjeff		}
511234183Sjhb#endif
512219820Sjeff	}
513272027Shselasky
514272027Shselasky
515219820Sjeff	err = mlx4_en_fill_rx_buffers(priv);
516219820Sjeff	if (err)
517219820Sjeff		goto err_buffers;
518219820Sjeff
519219820Sjeff	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
520272027Shselasky		ring = priv->rx_ring[ring_ind];
521219820Sjeff
522219820Sjeff		ring->size_mask = ring->actual_size - 1;
523219820Sjeff		mlx4_en_update_rx_prod_db(ring);
524219820Sjeff	}
525219820Sjeff
526219820Sjeff	return 0;
527219820Sjeff
528219820Sjefferr_buffers:
529219820Sjeff	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
530272027Shselasky		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
531219820Sjeff
532272027Shselasky	ring_ind = priv->rx_ring_num - 1;
533272027Shselasky
534272027Shselasky	while (ring_ind >= 0) {
535272027Shselasky		ring = priv->rx_ring[ring_ind];
536341912Shselasky		if (sizeof(struct mlx4_en_rx_desc) <= TXBB_SIZE)
537272027Shselasky			ring->buf -= TXBB_SIZE;
538272027Shselasky		ring_ind--;
539272027Shselasky	}
540272027Shselasky
541219820Sjeff	return err;
542219820Sjeff}
543219820Sjeff
544272027Shselasky
545219820Sjeffvoid mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
546272027Shselasky			     struct mlx4_en_rx_ring **pring,
547341912Shselasky			     u32 size)
548219820Sjeff{
549219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
550272027Shselasky	struct mlx4_en_rx_ring *ring = *pring;
551291699Shselasky	uint32_t x;
552219820Sjeff
553219820Sjeff	mlx4_en_unmap_buffer(&ring->wqres.buf);
554341912Shselasky	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * sizeof(struct mlx4_en_rx_desc) + TXBB_SIZE);
555291699Shselasky	for (x = 0; x != size; x++)
556291699Shselasky		bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
557291699Shselasky	/* free spare mbuf, if any */
558291699Shselasky	if (ring->spare.mbuf != NULL) {
559291699Shselasky		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
560291699Shselasky		    BUS_DMASYNC_POSTREAD);
561291699Shselasky		bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
562291699Shselasky		m_freem(ring->spare.mbuf);
563291699Shselasky	}
564291699Shselasky	bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
565291699Shselasky	vfree(ring->mbuf);
566291699Shselasky	bus_dma_tag_destroy(ring->dma_tag);
567272027Shselasky	kfree(ring);
568272027Shselasky	*pring = NULL;
569272027Shselasky#ifdef CONFIG_RFS_ACCEL
570272027Shselasky	mlx4_en_cleanup_filters(priv, ring);
571272027Shselasky#endif
572219820Sjeff}
573219820Sjeff
574219820Sjeffvoid mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
575219820Sjeff				struct mlx4_en_rx_ring *ring)
576219820Sjeff{
577234183Sjhb#ifdef INET
578219820Sjeff	tcp_lro_free(&ring->lro);
579234183Sjhb#endif
580219820Sjeff	mlx4_en_free_rx_buf(priv, ring);
581341912Shselasky	if (sizeof(struct mlx4_en_rx_desc) <= TXBB_SIZE)
582219820Sjeff		ring->buf -= TXBB_SIZE;
583219820Sjeff}
584219820Sjeff
585219820Sjeff
586272027Shselaskystatic void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
587272027Shselasky{
588272027Shselasky	int i;
589272027Shselasky	int offset = ETHER_HDR_LEN;
590272027Shselasky
591272027Shselasky	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
592272027Shselasky		if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
593272027Shselasky			goto out_loopback;
594272027Shselasky	}
595272027Shselasky	/* Loopback found */
596272027Shselasky	priv->loopback_ok = 1;
597272027Shselasky
598272027Shselaskyout_loopback:
599272027Shselasky	m_freem(mb);
600272027Shselasky}
601272027Shselasky
602272027Shselasky
603272027Shselaskystatic inline int invalid_cqe(struct mlx4_en_priv *priv,
604272027Shselasky			      struct mlx4_cqe *cqe)
605272027Shselasky{
606272027Shselasky	/* Drop packet on bad receive or bad checksum */
607272027Shselasky	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
608272027Shselasky		     MLX4_CQE_OPCODE_ERROR)) {
609272027Shselasky		en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
610272027Shselasky		       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
611272027Shselasky		       ((struct mlx4_err_cqe *)cqe)->syndrome);
612272027Shselasky		return 1;
613272027Shselasky	}
614272027Shselasky	if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
615272027Shselasky		en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
616272027Shselasky		return 1;
617272027Shselasky	}
618272027Shselasky
619272027Shselasky	return 0;
620272027Shselasky}
621272027Shselasky
622291699Shselaskystatic struct mbuf *
623291699Shselaskymlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
624291699Shselasky    struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list,
625291699Shselasky    int length)
626219820Sjeff{
627341912Shselasky#if (MLX4_EN_MAX_RX_SEGS != 1)
628341912Shselasky	struct mbuf *mb_head;
629341912Shselasky#endif
630219820Sjeff	struct mbuf *mb;
631219820Sjeff
632341918Shselasky	/* optimise reception of small packets */
633341918Shselasky	if (length <= (MHLEN - MLX4_NET_IP_ALIGN) &&
634341918Shselasky	    (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
635341918Shselasky
636341918Shselasky		/* set packet length */
637341918Shselasky		mb->m_pkthdr.len = mb->m_len = length;
638341918Shselasky
639341918Shselasky		/* make sure IP header gets aligned */
640341918Shselasky		mb->m_data += MLX4_NET_IP_ALIGN;
641341918Shselasky
642341918Shselasky		bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
643341918Shselasky		    BUS_DMASYNC_POSTREAD);
644341918Shselasky
645341918Shselasky		bcopy(mtod(mb_list->mbuf, caddr_t), mtod(mb, caddr_t), length);
646341918Shselasky
647341918Shselasky		return (mb);
648341918Shselasky	}
649341918Shselasky
650291699Shselasky	/* get mbuf */
651291699Shselasky	mb = mb_list->mbuf;
652219820Sjeff
653291699Shselasky	/* collect used fragment while atomically replacing it */
654341912Shselasky	if (mlx4_en_alloc_buf(ring, rx_desc, mb_list))
655291699Shselasky		return (NULL);
656219820Sjeff
657291699Shselasky	/* range check hardware computed value */
658341912Shselasky	if (unlikely(length > mb->m_pkthdr.len))
659341912Shselasky		length = mb->m_pkthdr.len;
660219820Sjeff
661341912Shselasky#if (MLX4_EN_MAX_RX_SEGS == 1)
662291699Shselasky	/* update total packet length in packet header */
663291699Shselasky	mb->m_len = mb->m_pkthdr.len = length;
664341912Shselasky#else
665341912Shselasky	mb->m_pkthdr.len = length;
666341912Shselasky	for (mb_head = mb; mb != NULL; mb = mb->m_next) {
667341912Shselasky		if (mb->m_len > length)
668341912Shselasky			mb->m_len = length;
669341912Shselasky		length -= mb->m_len;
670341912Shselasky		if (likely(length == 0)) {
671341912Shselasky			if (likely(mb->m_next != NULL)) {
672341912Shselasky				/* trim off empty mbufs */
673341912Shselasky				m_freem(mb->m_next);
674341912Shselasky				mb->m_next = NULL;
675341912Shselasky			}
676341912Shselasky			break;
677341912Shselasky		}
678341912Shselasky	}
679341912Shselasky	/* rewind to first mbuf in chain */
680341912Shselasky	mb = mb_head;
681341912Shselasky#endif
682291699Shselasky	return (mb);
683219820Sjeff}
684219820Sjeff
685329159Shselaskystatic __inline int
686329159Shselaskymlx4_en_rss_hash(__be16 status, int udp_rss)
687329159Shselasky{
688329159Shselasky	enum {
689329159Shselasky		status_all = cpu_to_be16(
690329159Shselasky			MLX4_CQE_STATUS_IPV4    |
691329159Shselasky			MLX4_CQE_STATUS_IPV4F   |
692329159Shselasky			MLX4_CQE_STATUS_IPV6    |
693329159Shselasky			MLX4_CQE_STATUS_TCP     |
694329159Shselasky			MLX4_CQE_STATUS_UDP),
695329159Shselasky		status_ipv4_tcp = cpu_to_be16(
696329159Shselasky			MLX4_CQE_STATUS_IPV4    |
697329159Shselasky			MLX4_CQE_STATUS_TCP),
698329159Shselasky		status_ipv6_tcp = cpu_to_be16(
699329159Shselasky			MLX4_CQE_STATUS_IPV6    |
700329159Shselasky			MLX4_CQE_STATUS_TCP),
701329159Shselasky		status_ipv4_udp = cpu_to_be16(
702329159Shselasky			MLX4_CQE_STATUS_IPV4    |
703329159Shselasky			MLX4_CQE_STATUS_UDP),
704329159Shselasky		status_ipv6_udp = cpu_to_be16(
705329159Shselasky			MLX4_CQE_STATUS_IPV6    |
706329159Shselasky			MLX4_CQE_STATUS_UDP),
707329159Shselasky		status_ipv4 = cpu_to_be16(MLX4_CQE_STATUS_IPV4),
708329159Shselasky		status_ipv6 = cpu_to_be16(MLX4_CQE_STATUS_IPV6)
709329159Shselasky	};
710329159Shselasky
711329159Shselasky	status &= status_all;
712329159Shselasky	switch (status) {
713329159Shselasky	case status_ipv4_tcp:
714329159Shselasky		return (M_HASHTYPE_RSS_TCP_IPV4);
715329159Shselasky	case status_ipv6_tcp:
716329159Shselasky		return (M_HASHTYPE_RSS_TCP_IPV6);
717329159Shselasky	case status_ipv4_udp:
718329159Shselasky		return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV4
719329159Shselasky		    : M_HASHTYPE_RSS_IPV4);
720329159Shselasky	case status_ipv6_udp:
721329159Shselasky		return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV6
722329159Shselasky		    : M_HASHTYPE_RSS_IPV6);
723329159Shselasky	default:
724329159Shselasky		if (status & status_ipv4)
725329159Shselasky			return (M_HASHTYPE_RSS_IPV4);
726329159Shselasky		if (status & status_ipv6)
727329159Shselasky			return (M_HASHTYPE_RSS_IPV6);
728329159Shselasky		return (M_HASHTYPE_OPAQUE_HASH);
729329159Shselasky	}
730329159Shselasky}
731329159Shselasky
732272027Shselasky/* For cpu arch with cache line of 64B the performance is better when cqe size==64B
733272027Shselasky * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
734272027Shselasky * was added in the beginning of each cqe (the real data is in the corresponding 32B).
735299179Spfg * The following calc ensures that when factor==1, it means we are aligned to 64B
736272027Shselasky * and we get the real cqe data*/
737329159Shselasky#define CQE_FACTOR_INDEX(index, factor) (((index) << (factor)) + (factor))
738219820Sjeffint mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
739219820Sjeff{
740219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(dev);
741219820Sjeff	struct mlx4_cqe *cqe;
742272027Shselasky	struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
743291699Shselasky	struct mlx4_en_rx_mbuf *mb_list;
744219820Sjeff	struct mlx4_en_rx_desc *rx_desc;
745219820Sjeff	struct mbuf *mb;
746272027Shselasky	struct mlx4_cq *mcq = &cq->mcq;
747272027Shselasky	struct mlx4_cqe *buf = cq->buf;
748219820Sjeff	int index;
749219820Sjeff	unsigned int length;
750219820Sjeff	int polled = 0;
751272027Shselasky	u32 cons_index = mcq->cons_index;
752272027Shselasky	u32 size_mask = ring->size_mask;
753272027Shselasky	int size = cq->size;
754272027Shselasky	int factor = priv->cqe_factor;
755329159Shselasky	const int udp_rss = priv->mdev->profile.udp_rss;
756219820Sjeff
757219820Sjeff	if (!priv->port_up)
758219820Sjeff		return 0;
759219820Sjeff
760219820Sjeff	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
761272027Shselasky	 * descriptor offset can be deducted from the CQE index instead of
762219820Sjeff	 * reading 'cqe->index' */
763272027Shselasky	index = cons_index & size_mask;
764272027Shselasky	cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
765219820Sjeff
766219820Sjeff	/* Process all completed CQEs */
767219820Sjeff	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
768272027Shselasky		    cons_index & size)) {
769291699Shselasky		mb_list = ring->mbuf + index;
770341912Shselasky		rx_desc = ((struct mlx4_en_rx_desc *)ring->buf) + index;
771219820Sjeff
772219820Sjeff		/*
773219820Sjeff		 * make sure we read the CQE after we read the ownership bit
774219820Sjeff		 */
775219820Sjeff		rmb();
776219820Sjeff
777272027Shselasky		if (invalid_cqe(priv, cqe)) {
778219820Sjeff			goto next;
779272027Shselasky		}
780219820Sjeff		/*
781219820Sjeff		 * Packet is OK - process it.
782219820Sjeff		 */
783219820Sjeff		length = be32_to_cpu(cqe->byte_cnt);
784272027Shselasky		length -= ring->fcs_del;
785291699Shselasky
786291699Shselasky		mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
787291699Shselasky		if (unlikely(!mb)) {
788219820Sjeff			ring->errors++;
789219820Sjeff			goto next;
790219820Sjeff		}
791219820Sjeff
792219820Sjeff		ring->bytes += length;
793219820Sjeff		ring->packets++;
794219820Sjeff
795272027Shselasky		if (unlikely(priv->validate_loopback)) {
796219820Sjeff			validate_loopback(priv, mb);
797219820Sjeff			goto next;
798219820Sjeff		}
799219820Sjeff
800296910Shselasky		/* forward Toeplitz compatible hash value */
801296910Shselasky		mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid);
802329159Shselasky		M_HASHTYPE_SET(mb, mlx4_en_rss_hash(cqe->status, udp_rss));
803219820Sjeff		mb->m_pkthdr.rcvif = dev;
804219820Sjeff		if (be32_to_cpu(cqe->vlan_my_qpn) &
805329159Shselasky		    MLX4_CQE_CVLAN_PRESENT_MASK) {
806219820Sjeff			mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
807219820Sjeff			mb->m_flags |= M_VLANTAG;
808219820Sjeff		}
809291694Shselasky		if (likely(dev->if_capenable &
810291694Shselasky		    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
811219820Sjeff		    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
812219820Sjeff		    (cqe->checksum == cpu_to_be16(0xffff))) {
813219820Sjeff			priv->port_stats.rx_chksum_good++;
814272027Shselasky			mb->m_pkthdr.csum_flags =
815219820Sjeff			    CSUM_IP_CHECKED | CSUM_IP_VALID |
816219820Sjeff			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
817219820Sjeff			mb->m_pkthdr.csum_data = htons(0xffff);
818219820Sjeff			/* This packet is eligible for LRO if it is:
819219820Sjeff			 * - DIX Ethernet (type interpretation)
820219820Sjeff			 * - TCP/IP (v4)
821219820Sjeff			 * - without IP options
822219820Sjeff			 * - not an IP fragment
823219820Sjeff			 */
824234183Sjhb#ifdef INET
825219820Sjeff			if (mlx4_en_can_lro(cqe->status) &&
826272027Shselasky					(dev->if_capenable & IFCAP_LRO)) {
827219820Sjeff				if (ring->lro.lro_cnt != 0 &&
828272027Shselasky						tcp_lro_rx(&ring->lro, mb, 0) == 0)
829219820Sjeff					goto next;
830219820Sjeff			}
831272027Shselasky
832234183Sjhb#endif
833219820Sjeff			/* LRO not possible, complete processing here */
834219820Sjeff			INC_PERF_COUNTER(priv->pstats.lro_misses);
835219820Sjeff		} else {
836219820Sjeff			mb->m_pkthdr.csum_flags = 0;
837219820Sjeff			priv->port_stats.rx_chksum_none++;
838219820Sjeff		}
839219820Sjeff
840219820Sjeff		/* Push it up the stack */
841219820Sjeff		dev->if_input(dev, mb);
842219820Sjeff
843219820Sjeffnext:
844272027Shselasky		++cons_index;
845272027Shselasky		index = cons_index & size_mask;
846272027Shselasky		cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
847219820Sjeff		if (++polled == budget)
848219859Sjeff			goto out;
849219820Sjeff	}
850219859Sjeff	/* Flush all pending IP reassembly sessions */
851219859Sjeffout:
852234183Sjhb#ifdef INET
853297482Ssephe	tcp_lro_flush_all(&ring->lro);
854234183Sjhb#endif
855219820Sjeff	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
856272027Shselasky	mcq->cons_index = cons_index;
857272027Shselasky	mlx4_cq_set_ci(mcq);
858219820Sjeff	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
859272027Shselasky	ring->cons = mcq->cons_index;
860219820Sjeff	ring->prod += polled; /* Polled descriptors were realocated in place */
861219820Sjeff	mlx4_en_update_rx_prod_db(ring);
862219820Sjeff	return polled;
863272027Shselasky
864219820Sjeff}
865219820Sjeff
866219820Sjeff/* Rx CQ polling - called by NAPI */
867219820Sjeffstatic int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
868219820Sjeff{
869272027Shselasky        struct net_device *dev = cq->dev;
870272027Shselasky        int done;
871219820Sjeff
872272027Shselasky        done = mlx4_en_process_rx_cq(dev, cq, budget);
873272027Shselasky        cq->tot_rx += done;
874219820Sjeff
875272027Shselasky        return done;
876219820Sjeff
877219820Sjeff}
878219820Sjeffvoid mlx4_en_rx_irq(struct mlx4_cq *mcq)
879219820Sjeff{
880219820Sjeff	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
881219820Sjeff	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
882272027Shselasky        int done;
883219820Sjeff
884272027Shselasky        // Shoot one within the irq context
885272027Shselasky        // Because there is no NAPI in freeBSD
886272027Shselasky        done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
887272027Shselasky	if (priv->port_up  && (done == MLX4_EN_RX_BUDGET) ) {
888291694Shselasky		cq->curr_poll_rx_cpu_id = curcpu;
889219820Sjeff		taskqueue_enqueue(cq->tq, &cq->cq_task);
890272027Shselasky        }
891272027Shselasky	else {
892219820Sjeff		mlx4_en_arm_cq(priv, cq);
893272027Shselasky	}
894219820Sjeff}
895219820Sjeff
896272027Shselaskyvoid mlx4_en_rx_que(void *context, int pending)
897219820Sjeff{
898272027Shselasky        struct mlx4_en_cq *cq;
899291694Shselasky	struct thread *td;
900219820Sjeff
901272027Shselasky        cq = context;
902291694Shselasky	td = curthread;
903291694Shselasky
904291694Shselasky	thread_lock(td);
905291694Shselasky	sched_bind(td, cq->curr_poll_rx_cpu_id);
906291694Shselasky	thread_unlock(td);
907291694Shselasky
908272027Shselasky        while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
909272027Shselasky                        == MLX4_EN_RX_BUDGET);
910272027Shselasky        mlx4_en_arm_cq(cq->dev->if_softc, cq);
911272027Shselasky}
912219820Sjeff
913219820Sjeff
914219820Sjeff/* RSS related functions */
915219820Sjeff
916219820Sjeffstatic int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
917219820Sjeff				 struct mlx4_en_rx_ring *ring,
918219820Sjeff				 enum mlx4_qp_state *state,
919219820Sjeff				 struct mlx4_qp *qp)
920219820Sjeff{
921219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
922219820Sjeff	struct mlx4_qp_context *context;
923219820Sjeff	int err = 0;
924219820Sjeff
925219820Sjeff	context = kmalloc(sizeof *context , GFP_KERNEL);
926219820Sjeff	if (!context) {
927219820Sjeff		en_err(priv, "Failed to allocate qp context\n");
928219820Sjeff		return -ENOMEM;
929219820Sjeff	}
930272027Shselasky
931329159Shselasky	err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
932219820Sjeff	if (err) {
933219820Sjeff		en_err(priv, "Failed to allocate qp #%x\n", qpn);
934219820Sjeff		goto out;
935219820Sjeff	}
936219820Sjeff	qp->event = mlx4_en_sqp_event;
937219820Sjeff
938219820Sjeff	memset(context, 0, sizeof *context);
939341912Shselasky	mlx4_en_fill_qp_context(priv, ring->actual_size, sizeof(struct mlx4_en_rx_desc), 0, 0,
940272027Shselasky				qpn, ring->cqn, -1, context);
941219820Sjeff	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
942219820Sjeff
943272027Shselasky	/* Cancel FCS removal if FW allows */
944272027Shselasky	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
945272027Shselasky		context->param3 |= cpu_to_be32(1 << 29);
946272027Shselasky		ring->fcs_del = ETH_FCS_LEN;
947272027Shselasky	} else
948272027Shselasky		ring->fcs_del = 0;
949272027Shselasky
950219820Sjeff	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
951219820Sjeff	if (err) {
952219820Sjeff		mlx4_qp_remove(mdev->dev, qp);
953219820Sjeff		mlx4_qp_free(mdev->dev, qp);
954219820Sjeff	}
955219820Sjeff	mlx4_en_update_rx_prod_db(ring);
956219820Sjeffout:
957219820Sjeff	kfree(context);
958219820Sjeff	return err;
959219820Sjeff}
960219820Sjeff
961272027Shselaskyint mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
962272027Shselasky{
963272027Shselasky	int err;
964272027Shselasky	u32 qpn;
965272027Shselasky
966272027Shselasky	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
967272027Shselasky	if (err) {
968272027Shselasky		en_err(priv, "Failed reserving drop qpn\n");
969272027Shselasky		return err;
970272027Shselasky	}
971329159Shselasky	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
972272027Shselasky	if (err) {
973272027Shselasky		en_err(priv, "Failed allocating drop qp\n");
974272027Shselasky		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
975272027Shselasky		return err;
976272027Shselasky	}
977272027Shselasky
978272027Shselasky	return 0;
979272027Shselasky}
980272027Shselasky
981272027Shselaskyvoid mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
982272027Shselasky{
983272027Shselasky	u32 qpn;
984272027Shselasky
985272027Shselasky	qpn = priv->drop_qp.qpn;
986272027Shselasky	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
987272027Shselasky	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
988272027Shselasky	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
989272027Shselasky}
990272027Shselasky
991329159Shselaskyconst u32 *
992329159Shselaskymlx4_en_get_rss_key(struct mlx4_en_priv *priv __unused,
993329159Shselasky    u16 *keylen)
994329159Shselasky{
995329159Shselasky	static const u32 rsskey[10] = {
996329159Shselasky		cpu_to_be32(0xD181C62C),
997329159Shselasky		cpu_to_be32(0xF7F4DB5B),
998329159Shselasky		cpu_to_be32(0x1983A2FC),
999329159Shselasky		cpu_to_be32(0x943E1ADB),
1000329159Shselasky		cpu_to_be32(0xD9389E6B),
1001329159Shselasky		cpu_to_be32(0xD1039C2C),
1002329159Shselasky		cpu_to_be32(0xA74499AD),
1003329159Shselasky		cpu_to_be32(0x593D56D9),
1004329159Shselasky		cpu_to_be32(0xF3253C06),
1005329159Shselasky		cpu_to_be32(0x2ADC1FFC)
1006329159Shselasky	};
1007329159Shselasky
1008329159Shselasky	if (keylen != NULL)
1009329159Shselasky		*keylen = sizeof(rsskey);
1010329159Shselasky	return (rsskey);
1011329159Shselasky}
1012329159Shselasky
1013329159Shselaskyu8 mlx4_en_get_rss_mask(struct mlx4_en_priv *priv)
1014329159Shselasky{
1015329159Shselasky	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1016329159Shselasky			MLX4_RSS_TCP_IPV6);
1017329159Shselasky
1018329159Shselasky	if (priv->mdev->profile.udp_rss)
1019329159Shselasky		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
1020329159Shselasky	return (rss_mask);
1021329159Shselasky}
1022329159Shselasky
1023219820Sjeff/* Allocate rx qp's and configure them according to rss map */
1024219820Sjeffint mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1025219820Sjeff{
1026219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
1027219820Sjeff	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1028219820Sjeff	struct mlx4_qp_context context;
1029272027Shselasky	struct mlx4_rss_context *rss_context;
1030329159Shselasky	const u32 *key;
1031272027Shselasky	int rss_rings;
1032219820Sjeff	void *ptr;
1033272027Shselasky	int i;
1034219820Sjeff	int err = 0;
1035219820Sjeff	int good_qps = 0;
1036219820Sjeff
1037219820Sjeff	en_dbg(DRV, priv, "Configuring rss steering\n");
1038219820Sjeff	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
1039272027Shselasky				    priv->rx_ring_num,
1040255932Salfred				    &rss_map->base_qpn, 0);
1041219820Sjeff	if (err) {
1042219820Sjeff		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1043219820Sjeff		return err;
1044219820Sjeff	}
1045219820Sjeff
1046219820Sjeff	for (i = 0; i < priv->rx_ring_num; i++) {
1047272027Shselasky		priv->rx_ring[i]->qpn = rss_map->base_qpn + i;
1048272027Shselasky		err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn,
1049272027Shselasky					    priv->rx_ring[i],
1050219820Sjeff					    &rss_map->state[i],
1051219820Sjeff					    &rss_map->qps[i]);
1052219820Sjeff		if (err)
1053219820Sjeff			goto rss_err;
1054219820Sjeff
1055219820Sjeff		++good_qps;
1056219820Sjeff	}
1057219820Sjeff
1058219820Sjeff	/* Configure RSS indirection qp */
1059329159Shselasky	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
1060219820Sjeff	if (err) {
1061219820Sjeff		en_err(priv, "Failed to allocate RSS indirection QP\n");
1062272027Shselasky		goto rss_err;
1063219820Sjeff	}
1064219820Sjeff	rss_map->indir_qp.event = mlx4_en_sqp_event;
1065219820Sjeff	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1066272027Shselasky				priv->rx_ring[0]->cqn, -1, &context);
1067219820Sjeff
1068272027Shselasky	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1069272027Shselasky		rss_rings = priv->rx_ring_num;
1070272027Shselasky	else
1071272027Shselasky		rss_rings = priv->prof->rss_rings;
1072272027Shselasky
1073291694Shselasky	ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
1074291694Shselasky	    MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1075272027Shselasky	rss_context = ptr;
1076272027Shselasky	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1077219820Sjeff					    (rss_map->base_qpn));
1078219859Sjeff	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1079329159Shselasky	if (priv->mdev->profile.udp_rss)
1080272027Shselasky		rss_context->base_qpn_udp = rss_context->default_qpn;
1081329159Shselasky	rss_context->flags = mlx4_en_get_rss_mask(priv);
1082272027Shselasky	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1083329159Shselasky	key = mlx4_en_get_rss_key(priv, NULL);
1084272027Shselasky	for (i = 0; i < 10; i++)
1085329159Shselasky		rss_context->rss_key[i] = key[i];
1086219820Sjeff
1087219820Sjeff	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1088219820Sjeff			       &rss_map->indir_qp, &rss_map->indir_state);
1089219820Sjeff	if (err)
1090219820Sjeff		goto indir_err;
1091219820Sjeff
1092219820Sjeff	return 0;
1093219820Sjeff
1094219820Sjeffindir_err:
1095219820Sjeff	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1096219820Sjeff		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1097219820Sjeff	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1098219820Sjeff	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1099219820Sjeffrss_err:
1100219820Sjeff	for (i = 0; i < good_qps; i++) {
1101219820Sjeff		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1102219820Sjeff			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1103219820Sjeff		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1104219820Sjeff		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1105219820Sjeff	}
1106219820Sjeff	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1107219820Sjeff	return err;
1108219820Sjeff}
1109219820Sjeff
1110219820Sjeffvoid mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1111219820Sjeff{
1112219820Sjeff	struct mlx4_en_dev *mdev = priv->mdev;
1113219820Sjeff	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1114219820Sjeff	int i;
1115219820Sjeff
1116219820Sjeff	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1117219820Sjeff		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1118219820Sjeff	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1119219820Sjeff	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1120219820Sjeff
1121219820Sjeff	for (i = 0; i < priv->rx_ring_num; i++) {
1122219820Sjeff		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1123219820Sjeff			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1124219820Sjeff		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1125219820Sjeff		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1126219820Sjeff	}
1127219820Sjeff	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1128219820Sjeff}
1129272027Shselasky
1130