• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/mlx4/
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
38#include <linux/slab.h>
39
40#include <linux/mlx4/driver.h>
41#include <linux/mlx4/device.h>
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/cq.h>
44
45#include "mlx4_en.h"
46#include "en_port.h"
47
48
49static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
50{
51	struct mlx4_en_priv *priv = netdev_priv(dev);
52	struct mlx4_en_dev *mdev = priv->mdev;
53	int err;
54
55	en_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
56	priv->vlgrp = grp;
57
58	mutex_lock(&mdev->state_lock);
59	if (mdev->device_up && priv->port_up) {
60		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
61		if (err)
62			en_err(priv, "Failed configuring VLAN filter\n");
63	}
64	mutex_unlock(&mdev->state_lock);
65}
66
67static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
68{
69	struct mlx4_en_priv *priv = netdev_priv(dev);
70	struct mlx4_en_dev *mdev = priv->mdev;
71	int err;
72
73	if (!priv->vlgrp)
74		return;
75
76	en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
77	       vid, vlan_group_get_device(priv->vlgrp, vid));
78
79	/* Add VID to port VLAN filter */
80	mutex_lock(&mdev->state_lock);
81	if (mdev->device_up && priv->port_up) {
82		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
83		if (err)
84			en_err(priv, "Failed configuring VLAN filter\n");
85	}
86	mutex_unlock(&mdev->state_lock);
87}
88
89static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
90{
91	struct mlx4_en_priv *priv = netdev_priv(dev);
92	struct mlx4_en_dev *mdev = priv->mdev;
93	int err;
94
95	if (!priv->vlgrp)
96		return;
97
98	en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
99	       vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
100	vlan_group_set_device(priv->vlgrp, vid, NULL);
101
102	/* Remove VID from port VLAN filter */
103	mutex_lock(&mdev->state_lock);
104	if (mdev->device_up && priv->port_up) {
105		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106		if (err)
107			en_err(priv, "Failed configuring VLAN filter\n");
108	}
109	mutex_unlock(&mdev->state_lock);
110}
111
112static u64 mlx4_en_mac_to_u64(u8 *addr)
113{
114	u64 mac = 0;
115	int i;
116
117	for (i = 0; i < ETH_ALEN; i++) {
118		mac <<= 8;
119		mac |= addr[i];
120	}
121	return mac;
122}
123
124static int mlx4_en_set_mac(struct net_device *dev, void *addr)
125{
126	struct mlx4_en_priv *priv = netdev_priv(dev);
127	struct mlx4_en_dev *mdev = priv->mdev;
128	struct sockaddr *saddr = addr;
129
130	if (!is_valid_ether_addr(saddr->sa_data))
131		return -EADDRNOTAVAIL;
132
133	memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
134	priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
135	queue_work(mdev->workqueue, &priv->mac_task);
136	return 0;
137}
138
139static void mlx4_en_do_set_mac(struct work_struct *work)
140{
141	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
142						 mac_task);
143	struct mlx4_en_dev *mdev = priv->mdev;
144	int err = 0;
145
146	mutex_lock(&mdev->state_lock);
147	if (priv->port_up) {
148		/* Remove old MAC and insert the new one */
149		mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
150		err = mlx4_register_mac(mdev->dev, priv->port,
151					priv->mac, &priv->mac_index);
152		if (err)
153			en_err(priv, "Failed changing HW MAC address\n");
154	} else
155		en_dbg(HW, priv, "Port is down while "
156				 "registering mac, exiting...\n");
157
158	mutex_unlock(&mdev->state_lock);
159}
160
161static void mlx4_en_clear_list(struct net_device *dev)
162{
163	struct mlx4_en_priv *priv = netdev_priv(dev);
164
165	kfree(priv->mc_addrs);
166	priv->mc_addrs_cnt = 0;
167}
168
169static void mlx4_en_cache_mclist(struct net_device *dev)
170{
171	struct mlx4_en_priv *priv = netdev_priv(dev);
172	struct netdev_hw_addr *ha;
173	char *mc_addrs;
174	int mc_addrs_cnt = netdev_mc_count(dev);
175	int i;
176
177	mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
178	if (!mc_addrs) {
179		en_err(priv, "failed to allocate multicast list\n");
180		return;
181	}
182	i = 0;
183	netdev_for_each_mc_addr(ha, dev)
184		memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
185	priv->mc_addrs = mc_addrs;
186	priv->mc_addrs_cnt = mc_addrs_cnt;
187}
188
189
190static void mlx4_en_set_multicast(struct net_device *dev)
191{
192	struct mlx4_en_priv *priv = netdev_priv(dev);
193
194	if (!priv->port_up)
195		return;
196
197	queue_work(priv->mdev->workqueue, &priv->mcast_task);
198}
199
200static void mlx4_en_do_set_multicast(struct work_struct *work)
201{
202	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
203						 mcast_task);
204	struct mlx4_en_dev *mdev = priv->mdev;
205	struct net_device *dev = priv->dev;
206	u64 mcast_addr = 0;
207	int err;
208
209	mutex_lock(&mdev->state_lock);
210	if (!mdev->device_up) {
211		en_dbg(HW, priv, "Card is not up, "
212				 "ignoring multicast change.\n");
213		goto out;
214	}
215	if (!priv->port_up) {
216		en_dbg(HW, priv, "Port is down, "
217				 "ignoring  multicast change.\n");
218		goto out;
219	}
220
221	/*
222	 * Promsicuous mode: disable all filters
223	 */
224
225	if (dev->flags & IFF_PROMISC) {
226		if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
227			if (netif_msg_rx_status(priv))
228				en_warn(priv, "Entering promiscuous mode\n");
229			priv->flags |= MLX4_EN_FLAG_PROMISC;
230
231			/* Enable promiscouos mode */
232			err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
233						     priv->base_qpn, 1);
234			if (err)
235				en_err(priv, "Failed enabling "
236					     "promiscous mode\n");
237
238			/* Disable port multicast filter (unconditionally) */
239			err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
240						  0, MLX4_MCAST_DISABLE);
241			if (err)
242				en_err(priv, "Failed disabling "
243					     "multicast filter\n");
244
245			/* Disable port VLAN filter */
246			err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
247			if (err)
248				en_err(priv, "Failed disabling VLAN filter\n");
249		}
250		goto out;
251	}
252
253	/*
254	 * Not in promiscous mode
255	 */
256
257	if (priv->flags & MLX4_EN_FLAG_PROMISC) {
258		if (netif_msg_rx_status(priv))
259			en_warn(priv, "Leaving promiscuous mode\n");
260		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
261
262		/* Disable promiscouos mode */
263		err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
264					     priv->base_qpn, 0);
265		if (err)
266			en_err(priv, "Failed disabling promiscous mode\n");
267
268		/* Enable port VLAN filter */
269		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
270		if (err)
271			en_err(priv, "Failed enabling VLAN filter\n");
272	}
273
274	/* Enable/disable the multicast filter according to IFF_ALLMULTI */
275	if (dev->flags & IFF_ALLMULTI) {
276		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
277					  0, MLX4_MCAST_DISABLE);
278		if (err)
279			en_err(priv, "Failed disabling multicast filter\n");
280	} else {
281		int i;
282
283		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
284					  0, MLX4_MCAST_DISABLE);
285		if (err)
286			en_err(priv, "Failed disabling multicast filter\n");
287
288		/* Flush mcast filter and init it with broadcast address */
289		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
290				    1, MLX4_MCAST_CONFIG);
291
292		/* Update multicast list - we cache all addresses so they won't
293		 * change while HW is updated holding the command semaphor */
294		netif_tx_lock_bh(dev);
295		mlx4_en_cache_mclist(dev);
296		netif_tx_unlock_bh(dev);
297		for (i = 0; i < priv->mc_addrs_cnt; i++) {
298			mcast_addr =
299			      mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
300			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
301					    mcast_addr, 0, MLX4_MCAST_CONFIG);
302		}
303		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
304					  0, MLX4_MCAST_ENABLE);
305		if (err)
306			en_err(priv, "Failed enabling multicast filter\n");
307
308		mlx4_en_clear_list(dev);
309	}
310out:
311	mutex_unlock(&mdev->state_lock);
312}
313
314#ifdef CONFIG_NET_POLL_CONTROLLER
315static void mlx4_en_netpoll(struct net_device *dev)
316{
317	struct mlx4_en_priv *priv = netdev_priv(dev);
318	struct mlx4_en_cq *cq;
319	unsigned long flags;
320	int i;
321
322	for (i = 0; i < priv->rx_ring_num; i++) {
323		cq = &priv->rx_cq[i];
324		spin_lock_irqsave(&cq->lock, flags);
325		napi_synchronize(&cq->napi);
326		mlx4_en_process_rx_cq(dev, cq, 0);
327		spin_unlock_irqrestore(&cq->lock, flags);
328	}
329}
330#endif
331
332static void mlx4_en_tx_timeout(struct net_device *dev)
333{
334	struct mlx4_en_priv *priv = netdev_priv(dev);
335	struct mlx4_en_dev *mdev = priv->mdev;
336
337	if (netif_msg_timer(priv))
338		en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
339
340	priv->port_stats.tx_timeout++;
341	en_dbg(DRV, priv, "Scheduling watchdog\n");
342	queue_work(mdev->workqueue, &priv->watchdog_task);
343}
344
345
346static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
347{
348	struct mlx4_en_priv *priv = netdev_priv(dev);
349
350	spin_lock_bh(&priv->stats_lock);
351	memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
352	spin_unlock_bh(&priv->stats_lock);
353
354	return &priv->ret_stats;
355}
356
357static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
358{
359	struct mlx4_en_cq *cq;
360	int i;
361
362	/* If we haven't received a specific coalescing setting
363	 * (module param), we set the moderation parameters as follows:
364	 * - moder_cnt is set to the number of mtu sized packets to
365	 *   satisfy our coelsing target.
366	 * - moder_time is set to a fixed value.
367	 */
368	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
369	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
370	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
371			   "rx_frames:%d rx_usecs:%d\n",
372		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
373
374	/* Setup cq moderation params */
375	for (i = 0; i < priv->rx_ring_num; i++) {
376		cq = &priv->rx_cq[i];
377		cq->moder_cnt = priv->rx_frames;
378		cq->moder_time = priv->rx_usecs;
379	}
380
381	for (i = 0; i < priv->tx_ring_num; i++) {
382		cq = &priv->tx_cq[i];
383		cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
384		cq->moder_time = MLX4_EN_TX_COAL_TIME;
385	}
386
387	/* Reset auto-moderation params */
388	priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
389	priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
390	priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
391	priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
392	priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
393	priv->adaptive_rx_coal = 1;
394	priv->last_moder_time = MLX4_EN_AUTO_CONF;
395	priv->last_moder_jiffies = 0;
396	priv->last_moder_packets = 0;
397	priv->last_moder_tx_packets = 0;
398	priv->last_moder_bytes = 0;
399}
400
401static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
402{
403	unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
404	struct mlx4_en_cq *cq;
405	unsigned long packets;
406	unsigned long rate;
407	unsigned long avg_pkt_size;
408	unsigned long rx_packets;
409	unsigned long rx_bytes;
410	unsigned long rx_byte_diff;
411	unsigned long tx_packets;
412	unsigned long tx_pkt_diff;
413	unsigned long rx_pkt_diff;
414	int moder_time;
415	int i, err;
416
417	if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
418		return;
419
420	spin_lock_bh(&priv->stats_lock);
421	rx_packets = priv->stats.rx_packets;
422	rx_bytes = priv->stats.rx_bytes;
423	tx_packets = priv->stats.tx_packets;
424	spin_unlock_bh(&priv->stats_lock);
425
426	if (!priv->last_moder_jiffies || !period)
427		goto out;
428
429	tx_pkt_diff = ((unsigned long) (tx_packets -
430					priv->last_moder_tx_packets));
431	rx_pkt_diff = ((unsigned long) (rx_packets -
432					priv->last_moder_packets));
433	packets = max(tx_pkt_diff, rx_pkt_diff);
434	rx_byte_diff = rx_bytes - priv->last_moder_bytes;
435	rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
436	rate = packets * HZ / period;
437	avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
438				 priv->last_moder_bytes)) / packets : 0;
439
440	/* Apply auto-moderation only when packet rate exceeds a rate that
441	 * it matters */
442	if (rate > MLX4_EN_RX_RATE_THRESH) {
443		/* If tx and rx packet rates are not balanced, assume that
444		 * traffic is mainly BW bound and apply maximum moderation.
445		 * Otherwise, moderate according to packet rate */
446		if (2 * tx_pkt_diff > 3 * rx_pkt_diff &&
447		    rx_pkt_diff / rx_byte_diff <
448		    MLX4_EN_SMALL_PKT_SIZE)
449			moder_time = priv->rx_usecs_low;
450		else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
451			moder_time = priv->rx_usecs_high;
452		else {
453			if (rate < priv->pkt_rate_low)
454				moder_time = priv->rx_usecs_low;
455			else if (rate > priv->pkt_rate_high)
456				moder_time = priv->rx_usecs_high;
457			else
458				moder_time = (rate - priv->pkt_rate_low) *
459					(priv->rx_usecs_high - priv->rx_usecs_low) /
460					(priv->pkt_rate_high - priv->pkt_rate_low) +
461					priv->rx_usecs_low;
462		}
463	} else {
464		/* When packet rate is low, use default moderation rather than
465		 * 0 to prevent interrupt storms if traffic suddenly increases */
466		moder_time = priv->rx_usecs;
467	}
468
469	en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
470	       tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
471
472	en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
473	       "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
474		 priv->last_moder_time, moder_time, period, packets,
475		 avg_pkt_size, rate);
476
477	if (moder_time != priv->last_moder_time) {
478		priv->last_moder_time = moder_time;
479		for (i = 0; i < priv->rx_ring_num; i++) {
480			cq = &priv->rx_cq[i];
481			cq->moder_time = moder_time;
482			err = mlx4_en_set_cq_moder(priv, cq);
483			if (err) {
484				en_err(priv, "Failed modifying moderation for cq:%d\n", i);
485				break;
486			}
487		}
488	}
489
490out:
491	priv->last_moder_packets = rx_packets;
492	priv->last_moder_tx_packets = tx_packets;
493	priv->last_moder_bytes = rx_bytes;
494	priv->last_moder_jiffies = jiffies;
495}
496
497static void mlx4_en_do_get_stats(struct work_struct *work)
498{
499	struct delayed_work *delay = to_delayed_work(work);
500	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
501						 stats_task);
502	struct mlx4_en_dev *mdev = priv->mdev;
503	int err;
504
505	err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
506	if (err)
507		en_dbg(HW, priv, "Could not update stats\n");
508
509	mutex_lock(&mdev->state_lock);
510	if (mdev->device_up) {
511		if (priv->port_up)
512			mlx4_en_auto_moderation(priv);
513
514		queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
515	}
516	mutex_unlock(&mdev->state_lock);
517}
518
519static void mlx4_en_linkstate(struct work_struct *work)
520{
521	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
522						 linkstate_task);
523	struct mlx4_en_dev *mdev = priv->mdev;
524	int linkstate = priv->link_state;
525
526	mutex_lock(&mdev->state_lock);
527	/* If observable port state changed set carrier state and
528	 * report to system log */
529	if (priv->last_link_state != linkstate) {
530		if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
531			en_dbg(LINK, priv, "Link Down\n");
532			netif_carrier_off(priv->dev);
533		} else {
534			en_dbg(LINK, priv, "Link Up\n");
535			netif_carrier_on(priv->dev);
536		}
537	}
538	priv->last_link_state = linkstate;
539	mutex_unlock(&mdev->state_lock);
540}
541
542
543int mlx4_en_start_port(struct net_device *dev)
544{
545	struct mlx4_en_priv *priv = netdev_priv(dev);
546	struct mlx4_en_dev *mdev = priv->mdev;
547	struct mlx4_en_cq *cq;
548	struct mlx4_en_tx_ring *tx_ring;
549	int rx_index = 0;
550	int tx_index = 0;
551	int err = 0;
552	int i;
553	int j;
554
555	if (priv->port_up) {
556		en_dbg(DRV, priv, "start port called while port already up\n");
557		return 0;
558	}
559
560	/* Calculate Rx buf size */
561	dev->mtu = min(dev->mtu, priv->max_mtu);
562	mlx4_en_calc_rx_buf(dev);
563	en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
564
565	/* Configure rx cq's and rings */
566	err = mlx4_en_activate_rx_rings(priv);
567	if (err) {
568		en_err(priv, "Failed to activate RX rings\n");
569		return err;
570	}
571	for (i = 0; i < priv->rx_ring_num; i++) {
572		cq = &priv->rx_cq[i];
573
574		err = mlx4_en_activate_cq(priv, cq);
575		if (err) {
576			en_err(priv, "Failed activating Rx CQ\n");
577			goto cq_err;
578		}
579		for (j = 0; j < cq->size; j++)
580			cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
581		err = mlx4_en_set_cq_moder(priv, cq);
582		if (err) {
583			en_err(priv, "Failed setting cq moderation parameters");
584			mlx4_en_deactivate_cq(priv, cq);
585			goto cq_err;
586		}
587		mlx4_en_arm_cq(priv, cq);
588		priv->rx_ring[i].cqn = cq->mcq.cqn;
589		++rx_index;
590	}
591
592	err = mlx4_en_config_rss_steer(priv);
593	if (err) {
594		en_err(priv, "Failed configuring rss steering\n");
595		goto cq_err;
596	}
597
598	/* Configure tx cq's and rings */
599	for (i = 0; i < priv->tx_ring_num; i++) {
600		/* Configure cq */
601		cq = &priv->tx_cq[i];
602		err = mlx4_en_activate_cq(priv, cq);
603		if (err) {
604			en_err(priv, "Failed allocating Tx CQ\n");
605			goto tx_err;
606		}
607		err = mlx4_en_set_cq_moder(priv, cq);
608		if (err) {
609			en_err(priv, "Failed setting cq moderation parameters");
610			mlx4_en_deactivate_cq(priv, cq);
611			goto tx_err;
612		}
613		en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
614		cq->buf->wqe_index = cpu_to_be16(0xffff);
615
616		/* Configure ring */
617		tx_ring = &priv->tx_ring[i];
618		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
619		if (err) {
620			en_err(priv, "Failed allocating Tx ring\n");
621			mlx4_en_deactivate_cq(priv, cq);
622			goto tx_err;
623		}
624		/* Set initial ownership of all Tx TXBBs to SW (1) */
625		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
626			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
627		++tx_index;
628	}
629
630	/* Configure port */
631	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
632				    priv->rx_skb_size + ETH_FCS_LEN,
633				    priv->prof->tx_pause,
634				    priv->prof->tx_ppp,
635				    priv->prof->rx_pause,
636				    priv->prof->rx_ppp);
637	if (err) {
638		en_err(priv, "Failed setting port general configurations "
639			     "for port %d, with error %d\n", priv->port, err);
640		goto tx_err;
641	}
642	/* Set default qp number */
643	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
644	if (err) {
645		en_err(priv, "Failed setting default qp numbers\n");
646		goto tx_err;
647	}
648	/* Set port mac number */
649	en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
650	err = mlx4_register_mac(mdev->dev, priv->port,
651				priv->mac, &priv->mac_index);
652	if (err) {
653		en_err(priv, "Failed setting port mac\n");
654		goto tx_err;
655	}
656
657	/* Init port */
658	en_dbg(HW, priv, "Initializing port\n");
659	err = mlx4_INIT_PORT(mdev->dev, priv->port);
660	if (err) {
661		en_err(priv, "Failed Initializing port\n");
662		goto mac_err;
663	}
664
665	/* Schedule multicast task to populate multicast list */
666	queue_work(mdev->workqueue, &priv->mcast_task);
667
668	priv->port_up = true;
669	netif_tx_start_all_queues(dev);
670	return 0;
671
672mac_err:
673	mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
674tx_err:
675	while (tx_index--) {
676		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
677		mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
678	}
679
680	mlx4_en_release_rss_steer(priv);
681cq_err:
682	while (rx_index--)
683		mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
684	for (i = 0; i < priv->rx_ring_num; i++)
685		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
686
687	return err; /* need to close devices */
688}
689
690
691void mlx4_en_stop_port(struct net_device *dev)
692{
693	struct mlx4_en_priv *priv = netdev_priv(dev);
694	struct mlx4_en_dev *mdev = priv->mdev;
695	int i;
696
697	if (!priv->port_up) {
698		en_dbg(DRV, priv, "stop port called while port already down\n");
699		return;
700	}
701
702	/* Synchronize with tx routine */
703	netif_tx_lock_bh(dev);
704	netif_tx_stop_all_queues(dev);
705	netif_tx_unlock_bh(dev);
706
707	/* close port*/
708	priv->port_up = false;
709	mlx4_CLOSE_PORT(mdev->dev, priv->port);
710
711	/* Unregister Mac address for the port */
712	mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
713
714	/* Free TX Rings */
715	for (i = 0; i < priv->tx_ring_num; i++) {
716		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
717		mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
718	}
719	msleep(10);
720
721	for (i = 0; i < priv->tx_ring_num; i++)
722		mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
723
724	/* Free RSS qps */
725	mlx4_en_release_rss_steer(priv);
726
727	/* Free RX Rings */
728	for (i = 0; i < priv->rx_ring_num; i++) {
729		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
730		while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
731			msleep(1);
732		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
733	}
734}
735
736static void mlx4_en_restart(struct work_struct *work)
737{
738	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
739						 watchdog_task);
740	struct mlx4_en_dev *mdev = priv->mdev;
741	struct net_device *dev = priv->dev;
742
743	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
744
745	mutex_lock(&mdev->state_lock);
746	if (priv->port_up) {
747		mlx4_en_stop_port(dev);
748		if (mlx4_en_start_port(dev))
749			en_err(priv, "Failed restarting port %d\n", priv->port);
750	}
751	mutex_unlock(&mdev->state_lock);
752}
753
754
755static int mlx4_en_open(struct net_device *dev)
756{
757	struct mlx4_en_priv *priv = netdev_priv(dev);
758	struct mlx4_en_dev *mdev = priv->mdev;
759	int i;
760	int err = 0;
761
762	mutex_lock(&mdev->state_lock);
763
764	if (!mdev->device_up) {
765		en_err(priv, "Cannot open - device down/disabled\n");
766		err = -EBUSY;
767		goto out;
768	}
769
770	/* Reset HW statistics and performance counters */
771	if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
772		en_dbg(HW, priv, "Failed dumping statistics\n");
773
774	memset(&priv->stats, 0, sizeof(priv->stats));
775	memset(&priv->pstats, 0, sizeof(priv->pstats));
776
777	for (i = 0; i < priv->tx_ring_num; i++) {
778		priv->tx_ring[i].bytes = 0;
779		priv->tx_ring[i].packets = 0;
780	}
781	for (i = 0; i < priv->rx_ring_num; i++) {
782		priv->rx_ring[i].bytes = 0;
783		priv->rx_ring[i].packets = 0;
784	}
785
786	mlx4_en_set_default_moderation(priv);
787	err = mlx4_en_start_port(dev);
788	if (err)
789		en_err(priv, "Failed starting port:%d\n", priv->port);
790
791out:
792	mutex_unlock(&mdev->state_lock);
793	return err;
794}
795
796
797static int mlx4_en_close(struct net_device *dev)
798{
799	struct mlx4_en_priv *priv = netdev_priv(dev);
800	struct mlx4_en_dev *mdev = priv->mdev;
801
802	en_dbg(IFDOWN, priv, "Close port called\n");
803
804	mutex_lock(&mdev->state_lock);
805
806	mlx4_en_stop_port(dev);
807	netif_carrier_off(dev);
808
809	mutex_unlock(&mdev->state_lock);
810	return 0;
811}
812
813void mlx4_en_free_resources(struct mlx4_en_priv *priv)
814{
815	int i;
816
817	for (i = 0; i < priv->tx_ring_num; i++) {
818		if (priv->tx_ring[i].tx_info)
819			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
820		if (priv->tx_cq[i].buf)
821			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
822	}
823
824	for (i = 0; i < priv->rx_ring_num; i++) {
825		if (priv->rx_ring[i].rx_info)
826			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
827		if (priv->rx_cq[i].buf)
828			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
829	}
830}
831
832int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
833{
834	struct mlx4_en_port_profile *prof = priv->prof;
835	int i;
836
837	/* Create tx Rings */
838	for (i = 0; i < priv->tx_ring_num; i++) {
839		if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
840				      prof->tx_ring_size, i, TX))
841			goto err;
842
843		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
844					   prof->tx_ring_size, TXBB_SIZE))
845			goto err;
846	}
847
848	/* Create rx Rings */
849	for (i = 0; i < priv->rx_ring_num; i++) {
850		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
851				      prof->rx_ring_size, i, RX))
852			goto err;
853
854		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
855					   prof->rx_ring_size, priv->stride))
856			goto err;
857	}
858
859	return 0;
860
861err:
862	en_err(priv, "Failed to allocate NIC resources\n");
863	return -ENOMEM;
864}
865
866
867void mlx4_en_destroy_netdev(struct net_device *dev)
868{
869	struct mlx4_en_priv *priv = netdev_priv(dev);
870	struct mlx4_en_dev *mdev = priv->mdev;
871
872	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
873
874	/* Unregister device - this will close the port if it was up */
875	if (priv->registered)
876		unregister_netdev(dev);
877
878	if (priv->allocated)
879		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
880
881	cancel_delayed_work(&priv->stats_task);
882	/* flush any pending task for this netdev */
883	flush_workqueue(mdev->workqueue);
884
885	/* Detach the netdev so tasks would not attempt to access it */
886	mutex_lock(&mdev->state_lock);
887	mdev->pndev[priv->port] = NULL;
888	mutex_unlock(&mdev->state_lock);
889
890	mlx4_en_free_resources(priv);
891	free_netdev(dev);
892}
893
894static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
895{
896	struct mlx4_en_priv *priv = netdev_priv(dev);
897	struct mlx4_en_dev *mdev = priv->mdev;
898	int err = 0;
899
900	en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
901		 dev->mtu, new_mtu);
902
903	if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
904		en_err(priv, "Bad MTU size:%d.\n", new_mtu);
905		return -EPERM;
906	}
907	dev->mtu = new_mtu;
908
909	if (netif_running(dev)) {
910		mutex_lock(&mdev->state_lock);
911		if (!mdev->device_up) {
912			/* NIC is probably restarting - let watchdog task reset
913			 * the port */
914			en_dbg(DRV, priv, "Change MTU called with card down!?\n");
915		} else {
916			mlx4_en_stop_port(dev);
917			mlx4_en_set_default_moderation(priv);
918			err = mlx4_en_start_port(dev);
919			if (err) {
920				en_err(priv, "Failed restarting port:%d\n",
921					 priv->port);
922				queue_work(mdev->workqueue, &priv->watchdog_task);
923			}
924		}
925		mutex_unlock(&mdev->state_lock);
926	}
927	return 0;
928}
929
930static const struct net_device_ops mlx4_netdev_ops = {
931	.ndo_open		= mlx4_en_open,
932	.ndo_stop		= mlx4_en_close,
933	.ndo_start_xmit		= mlx4_en_xmit,
934	.ndo_select_queue	= mlx4_en_select_queue,
935	.ndo_get_stats		= mlx4_en_get_stats,
936	.ndo_set_multicast_list	= mlx4_en_set_multicast,
937	.ndo_set_mac_address	= mlx4_en_set_mac,
938	.ndo_validate_addr	= eth_validate_addr,
939	.ndo_change_mtu		= mlx4_en_change_mtu,
940	.ndo_tx_timeout		= mlx4_en_tx_timeout,
941	.ndo_vlan_rx_register	= mlx4_en_vlan_rx_register,
942	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
943	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
944#ifdef CONFIG_NET_POLL_CONTROLLER
945	.ndo_poll_controller	= mlx4_en_netpoll,
946#endif
947};
948
949int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
950			struct mlx4_en_port_profile *prof)
951{
952	struct net_device *dev;
953	struct mlx4_en_priv *priv;
954	int i;
955	int err;
956
957	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
958	if (dev == NULL) {
959		mlx4_err(mdev, "Net device allocation failed\n");
960		return -ENOMEM;
961	}
962
963	SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
964	dev->dev_id =  port - 1;
965
966	/*
967	 * Initialize driver private data
968	 */
969
970	priv = netdev_priv(dev);
971	memset(priv, 0, sizeof(struct mlx4_en_priv));
972	priv->dev = dev;
973	priv->mdev = mdev;
974	priv->prof = prof;
975	priv->port = port;
976	priv->port_up = false;
977	priv->rx_csum = 1;
978	priv->flags = prof->flags;
979	priv->tx_ring_num = prof->tx_ring_num;
980	priv->rx_ring_num = prof->rx_ring_num;
981	priv->mac_index = -1;
982	priv->msg_enable = MLX4_EN_MSG_LEVEL;
983	spin_lock_init(&priv->stats_lock);
984	INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
985	INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
986	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
987	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
988	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
989
990	/* Query for default mac and max mtu */
991	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
992	priv->mac = mdev->dev->caps.def_mac[priv->port];
993	if (ILLEGAL_MAC(priv->mac)) {
994		en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
995			 priv->port, priv->mac);
996		err = -EINVAL;
997		goto out;
998	}
999
1000	priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1001					  DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1002	err = mlx4_en_alloc_resources(priv);
1003	if (err)
1004		goto out;
1005
1006	/* Allocate page for receive rings */
1007	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1008				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1009	if (err) {
1010		en_err(priv, "Failed to allocate page for rx qps\n");
1011		goto out;
1012	}
1013	priv->allocated = 1;
1014
1015	/*
1016	 * Initialize netdev entry points
1017	 */
1018	dev->netdev_ops = &mlx4_netdev_ops;
1019	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1020	dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS;
1021
1022	SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1023
1024	/* Set defualt MAC */
1025	dev->addr_len = ETH_ALEN;
1026	for (i = 0; i < ETH_ALEN; i++)
1027		dev->dev_addr[ETH_ALEN - 1 - i] =
1028		(u8) (priv->mac >> (8 * i));
1029
1030	/*
1031	 * Set driver features
1032	 */
1033	dev->features |= NETIF_F_SG;
1034	dev->vlan_features |= NETIF_F_SG;
1035	dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1036	dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1037	dev->features |= NETIF_F_HIGHDMA;
1038	dev->features |= NETIF_F_HW_VLAN_TX |
1039			 NETIF_F_HW_VLAN_RX |
1040			 NETIF_F_HW_VLAN_FILTER;
1041	if (mdev->profile.num_lro)
1042		dev->features |= NETIF_F_LRO;
1043	if (mdev->LSO_support) {
1044		dev->features |= NETIF_F_TSO;
1045		dev->features |= NETIF_F_TSO6;
1046		dev->vlan_features |= NETIF_F_TSO;
1047		dev->vlan_features |= NETIF_F_TSO6;
1048	}
1049
1050	mdev->pndev[port] = dev;
1051
1052	netif_carrier_off(dev);
1053	err = register_netdev(dev);
1054	if (err) {
1055		en_err(priv, "Netdev registration failed for port %d\n", port);
1056		goto out;
1057	}
1058
1059	en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1060	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1061
1062	priv->registered = 1;
1063	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1064	return 0;
1065
1066out:
1067	mlx4_en_destroy_netdev(dev);
1068	return err;
1069}
1070