• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/sfc/
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/netdevice.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/kernel_stat.h>
15#include <linux/pci.h>
16#include <linux/ethtool.h>
17#include <linux/ip.h>
18#include <linux/in.h>
19#include <linux/udp.h>
20#include <linux/rtnetlink.h>
21#include <linux/slab.h>
22#include <asm/io.h>
23#include "net_driver.h"
24#include "efx.h"
25#include "nic.h"
26#include "selftest.h"
27#include "workarounds.h"
28
29/*
30 * Loopback test packet structure
31 *
32 * The self-test should stress every RSS vector, and unfortunately
33 * Falcon only performs RSS on TCP/UDP packets.
34 */
35struct efx_loopback_payload {
36	struct ethhdr header;
37	struct iphdr ip;
38	struct udphdr udp;
39	__be16 iteration;
40	const char msg[64];
41} __packed;
42
43/* Loopback test source MAC address */
44static const unsigned char payload_source[ETH_ALEN] = {
45	0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
46};
47
48static const char payload_msg[] =
49	"Hello world! This is an Efx loopback test in progress!";
50
51/**
52 * efx_loopback_state - persistent state during a loopback selftest
53 * @flush:		Drop all packets in efx_loopback_rx_packet
54 * @packet_count:	Number of packets being used in this test
55 * @skbs:		An array of skbs transmitted
56 * @offload_csum:	Checksums are being offloaded
57 * @rx_good:		RX good packet count
58 * @rx_bad:		RX bad packet count
59 * @payload:		Payload used in tests
60 */
61struct efx_loopback_state {
62	bool flush;
63	int packet_count;
64	struct sk_buff **skbs;
65	bool offload_csum;
66	atomic_t rx_good;
67	atomic_t rx_bad;
68	struct efx_loopback_payload payload;
69};
70
71/**************************************************************************
72 *
73 * MII, NVRAM and register tests
74 *
75 **************************************************************************/
76
77static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
78{
79	int rc = 0;
80
81	if (efx->phy_op->test_alive) {
82		rc = efx->phy_op->test_alive(efx);
83		tests->phy_alive = rc ? -1 : 1;
84	}
85
86	return rc;
87}
88
89static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
90{
91	int rc = 0;
92
93	if (efx->type->test_nvram) {
94		rc = efx->type->test_nvram(efx);
95		tests->nvram = rc ? -1 : 1;
96	}
97
98	return rc;
99}
100
101static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
102{
103	int rc = 0;
104
105	/* Test register access */
106	if (efx->type->test_registers) {
107		rc = efx->type->test_registers(efx);
108		tests->registers = rc ? -1 : 1;
109	}
110
111	return rc;
112}
113
114/**************************************************************************
115 *
116 * Interrupt and event queue testing
117 *
118 **************************************************************************/
119
120/* Test generation and receipt of interrupts */
121static int efx_test_interrupts(struct efx_nic *efx,
122			       struct efx_self_tests *tests)
123{
124	struct efx_channel *channel;
125
126	netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
127	tests->interrupt = -1;
128
129	/* Reset interrupt flag */
130	efx->last_irq_cpu = -1;
131	smp_wmb();
132
133	/* ACK each interrupting event queue. Receiving an interrupt due to
134	 * traffic before a test event is raised is considered a pass */
135	efx_for_each_channel(channel, efx) {
136		if (channel->work_pending)
137			efx_process_channel_now(channel);
138		if (efx->last_irq_cpu >= 0)
139			goto success;
140	}
141
142	efx_nic_generate_interrupt(efx);
143
144	/* Wait for arrival of test interrupt. */
145	netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
146	schedule_timeout_uninterruptible(HZ / 10);
147	if (efx->last_irq_cpu >= 0)
148		goto success;
149
150	netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
151	return -ETIMEDOUT;
152
153 success:
154	netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
155		  INT_MODE(efx),
156		efx->last_irq_cpu);
157	tests->interrupt = 1;
158	return 0;
159}
160
161/* Test generation and receipt of interrupting events */
162static int efx_test_eventq_irq(struct efx_channel *channel,
163			       struct efx_self_tests *tests)
164{
165	struct efx_nic *efx = channel->efx;
166	unsigned int magic_count, count;
167
168	tests->eventq_dma[channel->channel] = -1;
169	tests->eventq_int[channel->channel] = -1;
170	tests->eventq_poll[channel->channel] = -1;
171
172	magic_count = channel->magic_count;
173	channel->efx->last_irq_cpu = -1;
174	smp_wmb();
175
176	efx_nic_generate_test_event(channel);
177
178	/* Wait for arrival of interrupt */
179	count = 0;
180	do {
181		schedule_timeout_uninterruptible(HZ / 100);
182
183		if (channel->work_pending)
184			efx_process_channel_now(channel);
185
186		if (channel->magic_count != magic_count)
187			goto eventq_ok;
188	} while (++count < 2);
189
190	netif_err(efx, drv, efx->net_dev,
191		  "channel %d timed out waiting for event queue\n",
192		  channel->channel);
193
194	/* See if interrupt arrived */
195	if (channel->efx->last_irq_cpu >= 0) {
196		netif_err(efx, drv, efx->net_dev,
197			  "channel %d saw interrupt on CPU%d "
198			  "during event queue test\n", channel->channel,
199			  raw_smp_processor_id());
200		tests->eventq_int[channel->channel] = 1;
201	}
202
203	/* Check to see if event was received even if interrupt wasn't */
204	efx_process_channel_now(channel);
205	if (channel->magic_count != magic_count) {
206		netif_err(efx, drv, efx->net_dev,
207			  "channel %d event was generated, but "
208			  "failed to trigger an interrupt\n", channel->channel);
209		tests->eventq_dma[channel->channel] = 1;
210	}
211
212	return -ETIMEDOUT;
213 eventq_ok:
214	netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
215		  channel->channel);
216	tests->eventq_dma[channel->channel] = 1;
217	tests->eventq_int[channel->channel] = 1;
218	tests->eventq_poll[channel->channel] = 1;
219	return 0;
220}
221
222static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
223			unsigned flags)
224{
225	int rc;
226
227	if (!efx->phy_op->run_tests)
228		return 0;
229
230	mutex_lock(&efx->mac_lock);
231	rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
232	mutex_unlock(&efx->mac_lock);
233	return rc;
234}
235
236/**************************************************************************
237 *
238 * Loopback testing
239 * NB Only one loopback test can be executing concurrently.
240 *
241 **************************************************************************/
242
243/* Loopback test RX callback
244 * This is called for each received packet during loopback testing.
245 */
246void efx_loopback_rx_packet(struct efx_nic *efx,
247			    const char *buf_ptr, int pkt_len)
248{
249	struct efx_loopback_state *state = efx->loopback_selftest;
250	struct efx_loopback_payload *received;
251	struct efx_loopback_payload *payload;
252
253	BUG_ON(!buf_ptr);
254
255	/* If we are just flushing, then drop the packet */
256	if ((state == NULL) || state->flush)
257		return;
258
259	payload = &state->payload;
260
261	received = (struct efx_loopback_payload *) buf_ptr;
262	received->ip.saddr = payload->ip.saddr;
263	if (state->offload_csum)
264		received->ip.check = payload->ip.check;
265
266	/* Check that header exists */
267	if (pkt_len < sizeof(received->header)) {
268		netif_err(efx, drv, efx->net_dev,
269			  "saw runt RX packet (length %d) in %s loopback "
270			  "test\n", pkt_len, LOOPBACK_MODE(efx));
271		goto err;
272	}
273
274	/* Check that the ethernet header exists */
275	if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
276		netif_err(efx, drv, efx->net_dev,
277			  "saw non-loopback RX packet in %s loopback test\n",
278			  LOOPBACK_MODE(efx));
279		goto err;
280	}
281
282	/* Check packet length */
283	if (pkt_len != sizeof(*payload)) {
284		netif_err(efx, drv, efx->net_dev,
285			  "saw incorrect RX packet length %d (wanted %d) in "
286			  "%s loopback test\n", pkt_len, (int)sizeof(*payload),
287			  LOOPBACK_MODE(efx));
288		goto err;
289	}
290
291	/* Check that IP header matches */
292	if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
293		netif_err(efx, drv, efx->net_dev,
294			  "saw corrupted IP header in %s loopback test\n",
295			  LOOPBACK_MODE(efx));
296		goto err;
297	}
298
299	/* Check that msg and padding matches */
300	if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
301		netif_err(efx, drv, efx->net_dev,
302			  "saw corrupted RX packet in %s loopback test\n",
303			  LOOPBACK_MODE(efx));
304		goto err;
305	}
306
307	/* Check that iteration matches */
308	if (received->iteration != payload->iteration) {
309		netif_err(efx, drv, efx->net_dev,
310			  "saw RX packet from iteration %d (wanted %d) in "
311			  "%s loopback test\n", ntohs(received->iteration),
312			  ntohs(payload->iteration), LOOPBACK_MODE(efx));
313		goto err;
314	}
315
316	/* Increase correct RX count */
317	netif_vdbg(efx, drv, efx->net_dev,
318		   "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
319
320	atomic_inc(&state->rx_good);
321	return;
322
323 err:
324#ifdef EFX_ENABLE_DEBUG
325	if (atomic_read(&state->rx_bad) == 0) {
326		netif_err(efx, drv, efx->net_dev, "received packet:\n");
327		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
328			       buf_ptr, pkt_len, 0);
329		netif_err(efx, drv, efx->net_dev, "expected packet:\n");
330		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
331			       &state->payload, sizeof(state->payload), 0);
332	}
333#endif
334	atomic_inc(&state->rx_bad);
335}
336
337/* Initialise an efx_selftest_state for a new iteration */
338static void efx_iterate_state(struct efx_nic *efx)
339{
340	struct efx_loopback_state *state = efx->loopback_selftest;
341	struct net_device *net_dev = efx->net_dev;
342	struct efx_loopback_payload *payload = &state->payload;
343
344	/* Initialise the layerII header */
345	memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
346	memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
347	payload->header.h_proto = htons(ETH_P_IP);
348
349	/* saddr set later and used as incrementing count */
350	payload->ip.daddr = htonl(INADDR_LOOPBACK);
351	payload->ip.ihl = 5;
352	payload->ip.check = htons(0xdead);
353	payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
354	payload->ip.version = IPVERSION;
355	payload->ip.protocol = IPPROTO_UDP;
356
357	/* Initialise udp header */
358	payload->udp.source = 0;
359	payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
360				 sizeof(struct iphdr));
361	payload->udp.check = 0;	/* checksum ignored */
362
363	/* Fill out payload */
364	payload->iteration = htons(ntohs(payload->iteration) + 1);
365	memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
366
367	/* Fill out remaining state members */
368	atomic_set(&state->rx_good, 0);
369	atomic_set(&state->rx_bad, 0);
370	smp_wmb();
371}
372
373static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
374{
375	struct efx_nic *efx = tx_queue->efx;
376	struct efx_loopback_state *state = efx->loopback_selftest;
377	struct efx_loopback_payload *payload;
378	struct sk_buff *skb;
379	int i;
380	netdev_tx_t rc;
381
382	/* Transmit N copies of buffer */
383	for (i = 0; i < state->packet_count; i++) {
384		/* Allocate an skb, holding an extra reference for
385		 * transmit completion counting */
386		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
387		if (!skb)
388			return -ENOMEM;
389		state->skbs[i] = skb;
390		skb_get(skb);
391
392		/* Copy the payload in, incrementing the source address to
393		 * exercise the rss vectors */
394		payload = ((struct efx_loopback_payload *)
395			   skb_put(skb, sizeof(state->payload)));
396		memcpy(payload, &state->payload, sizeof(state->payload));
397		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
398
399		/* Ensure everything we've written is visible to the
400		 * interrupt handler. */
401		smp_wmb();
402
403		if (efx_dev_registered(efx))
404			netif_tx_lock_bh(efx->net_dev);
405		rc = efx_enqueue_skb(tx_queue, skb);
406		if (efx_dev_registered(efx))
407			netif_tx_unlock_bh(efx->net_dev);
408
409		if (rc != NETDEV_TX_OK) {
410			netif_err(efx, drv, efx->net_dev,
411				  "TX queue %d could not transmit packet %d of "
412				  "%d in %s loopback test\n", tx_queue->queue,
413				  i + 1, state->packet_count,
414				  LOOPBACK_MODE(efx));
415
416			/* Defer cleaning up the other skbs for the caller */
417			kfree_skb(skb);
418			return -EPIPE;
419		}
420	}
421
422	return 0;
423}
424
425static int efx_poll_loopback(struct efx_nic *efx)
426{
427	struct efx_loopback_state *state = efx->loopback_selftest;
428	struct efx_channel *channel;
429
430	/* NAPI polling is not enabled, so process channels
431	 * synchronously */
432	efx_for_each_channel(channel, efx) {
433		if (channel->work_pending)
434			efx_process_channel_now(channel);
435	}
436	return atomic_read(&state->rx_good) == state->packet_count;
437}
438
439static int efx_end_loopback(struct efx_tx_queue *tx_queue,
440			    struct efx_loopback_self_tests *lb_tests)
441{
442	struct efx_nic *efx = tx_queue->efx;
443	struct efx_loopback_state *state = efx->loopback_selftest;
444	struct sk_buff *skb;
445	int tx_done = 0, rx_good, rx_bad;
446	int i, rc = 0;
447
448	if (efx_dev_registered(efx))
449		netif_tx_lock_bh(efx->net_dev);
450
451	/* Count the number of tx completions, and decrement the refcnt. Any
452	 * skbs not already completed will be free'd when the queue is flushed */
453	for (i=0; i < state->packet_count; i++) {
454		skb = state->skbs[i];
455		if (skb && !skb_shared(skb))
456			++tx_done;
457		dev_kfree_skb_any(skb);
458	}
459
460	if (efx_dev_registered(efx))
461		netif_tx_unlock_bh(efx->net_dev);
462
463	/* Check TX completion and received packet counts */
464	rx_good = atomic_read(&state->rx_good);
465	rx_bad = atomic_read(&state->rx_bad);
466	if (tx_done != state->packet_count) {
467		/* Don't free the skbs; they will be picked up on TX
468		 * overflow or channel teardown.
469		 */
470		netif_err(efx, drv, efx->net_dev,
471			  "TX queue %d saw only %d out of an expected %d "
472			  "TX completion events in %s loopback test\n",
473			  tx_queue->queue, tx_done, state->packet_count,
474			  LOOPBACK_MODE(efx));
475		rc = -ETIMEDOUT;
476		/* Allow to fall through so we see the RX errors as well */
477	}
478
479	/* We may always be up to a flush away from our desired packet total */
480	if (rx_good != state->packet_count) {
481		netif_dbg(efx, drv, efx->net_dev,
482			  "TX queue %d saw only %d out of an expected %d "
483			  "received packets in %s loopback test\n",
484			  tx_queue->queue, rx_good, state->packet_count,
485			  LOOPBACK_MODE(efx));
486		rc = -ETIMEDOUT;
487		/* Fall through */
488	}
489
490	/* Update loopback test structure */
491	lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
492	lb_tests->tx_done[tx_queue->queue] += tx_done;
493	lb_tests->rx_good += rx_good;
494	lb_tests->rx_bad += rx_bad;
495
496	return rc;
497}
498
499static int
500efx_test_loopback(struct efx_tx_queue *tx_queue,
501		  struct efx_loopback_self_tests *lb_tests)
502{
503	struct efx_nic *efx = tx_queue->efx;
504	struct efx_loopback_state *state = efx->loopback_selftest;
505	int i, begin_rc, end_rc;
506
507	for (i = 0; i < 3; i++) {
508		/* Determine how many packets to send */
509		state->packet_count = EFX_TXQ_SIZE / 3;
510		state->packet_count = min(1 << (i << 2), state->packet_count);
511		state->skbs = kzalloc(sizeof(state->skbs[0]) *
512				      state->packet_count, GFP_KERNEL);
513		if (!state->skbs)
514			return -ENOMEM;
515		state->flush = false;
516
517		netif_dbg(efx, drv, efx->net_dev,
518			  "TX queue %d testing %s loopback with %d packets\n",
519			  tx_queue->queue, LOOPBACK_MODE(efx),
520			  state->packet_count);
521
522		efx_iterate_state(efx);
523		begin_rc = efx_begin_loopback(tx_queue);
524
525		/* This will normally complete very quickly, but be
526		 * prepared to wait up to 100 ms. */
527		msleep(1);
528		if (!efx_poll_loopback(efx)) {
529			msleep(100);
530			efx_poll_loopback(efx);
531		}
532
533		end_rc = efx_end_loopback(tx_queue, lb_tests);
534		kfree(state->skbs);
535
536		if (begin_rc || end_rc) {
537			/* Wait a while to ensure there are no packets
538			 * floating around after a failure. */
539			schedule_timeout_uninterruptible(HZ / 10);
540			return begin_rc ? begin_rc : end_rc;
541		}
542	}
543
544	netif_dbg(efx, drv, efx->net_dev,
545		  "TX queue %d passed %s loopback test with a burst length "
546		  "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
547		  state->packet_count);
548
549	return 0;
550}
551
552/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
553 * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
554 * to delay and retry. Therefore, it's safer to just poll directly. Wait
555 * for link up and any faults to dissipate. */
556static int efx_wait_for_link(struct efx_nic *efx)
557{
558	struct efx_link_state *link_state = &efx->link_state;
559	int count, link_up_count = 0;
560	bool link_up;
561
562	for (count = 0; count < 40; count++) {
563		schedule_timeout_uninterruptible(HZ / 10);
564
565		if (efx->type->monitor != NULL) {
566			mutex_lock(&efx->mac_lock);
567			efx->type->monitor(efx);
568			mutex_unlock(&efx->mac_lock);
569		} else {
570			struct efx_channel *channel = &efx->channel[0];
571			if (channel->work_pending)
572				efx_process_channel_now(channel);
573		}
574
575		mutex_lock(&efx->mac_lock);
576		link_up = link_state->up;
577		if (link_up)
578			link_up = !efx->mac_op->check_fault(efx);
579		mutex_unlock(&efx->mac_lock);
580
581		if (link_up) {
582			if (++link_up_count == 2)
583				return 0;
584		} else {
585			link_up_count = 0;
586		}
587	}
588
589	return -ETIMEDOUT;
590}
591
592static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
593			      unsigned int loopback_modes)
594{
595	enum efx_loopback_mode mode;
596	struct efx_loopback_state *state;
597	struct efx_tx_queue *tx_queue;
598	int rc = 0;
599
600	/* Set the port loopback_selftest member. From this point on
601	 * all received packets will be dropped. Mark the state as
602	 * "flushing" so all inflight packets are dropped */
603	state = kzalloc(sizeof(*state), GFP_KERNEL);
604	if (state == NULL)
605		return -ENOMEM;
606	BUG_ON(efx->loopback_selftest);
607	state->flush = true;
608	efx->loopback_selftest = state;
609
610	/* Test all supported loopback modes */
611	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
612		if (!(loopback_modes & (1 << mode)))
613			continue;
614
615		/* Move the port into the specified loopback mode. */
616		state->flush = true;
617		mutex_lock(&efx->mac_lock);
618		efx->loopback_mode = mode;
619		rc = __efx_reconfigure_port(efx);
620		mutex_unlock(&efx->mac_lock);
621		if (rc) {
622			netif_err(efx, drv, efx->net_dev,
623				  "unable to move into %s loopback\n",
624				  LOOPBACK_MODE(efx));
625			goto out;
626		}
627
628		rc = efx_wait_for_link(efx);
629		if (rc) {
630			netif_err(efx, drv, efx->net_dev,
631				  "loopback %s never came up\n",
632				  LOOPBACK_MODE(efx));
633			goto out;
634		}
635
636		/* Test both types of TX queue */
637		efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
638			state->offload_csum = (tx_queue->queue &
639					       EFX_TXQ_TYPE_OFFLOAD);
640			rc = efx_test_loopback(tx_queue,
641					       &tests->loopback[mode]);
642			if (rc)
643				goto out;
644		}
645	}
646
647 out:
648	/* Remove the flush. The caller will remove the loopback setting */
649	state->flush = true;
650	efx->loopback_selftest = NULL;
651	wmb();
652	kfree(state);
653
654	return rc;
655}
656
657/**************************************************************************
658 *
659 * Entry point
660 *
661 *************************************************************************/
662
663int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
664		 unsigned flags)
665{
666	enum efx_loopback_mode loopback_mode = efx->loopback_mode;
667	int phy_mode = efx->phy_mode;
668	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
669	struct efx_channel *channel;
670	int rc_test = 0, rc_reset = 0, rc;
671
672	/* Online (i.e. non-disruptive) testing
673	 * This checks interrupt generation, event delivery and PHY presence. */
674
675	rc = efx_test_phy_alive(efx, tests);
676	if (rc && !rc_test)
677		rc_test = rc;
678
679	rc = efx_test_nvram(efx, tests);
680	if (rc && !rc_test)
681		rc_test = rc;
682
683	rc = efx_test_interrupts(efx, tests);
684	if (rc && !rc_test)
685		rc_test = rc;
686
687	efx_for_each_channel(channel, efx) {
688		rc = efx_test_eventq_irq(channel, tests);
689		if (rc && !rc_test)
690			rc_test = rc;
691	}
692
693	if (rc_test)
694		return rc_test;
695
696	if (!(flags & ETH_TEST_FL_OFFLINE))
697		return efx_test_phy(efx, tests, flags);
698
699	/* Offline (i.e. disruptive) testing
700	 * This checks MAC and PHY loopback on the specified port. */
701
702	/* force the carrier state off so the kernel doesn't transmit during
703	 * the loopback test, and the watchdog timeout doesn't fire. Also put
704	 * falcon into loopback for the register test.
705	 */
706	mutex_lock(&efx->mac_lock);
707	efx->port_inhibited = true;
708	if (efx->loopback_modes) {
709		/* We need the 312 clock from the PHY to test the XMAC
710		 * registers, so move into XGMII loopback if available */
711		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
712			efx->loopback_mode = LOOPBACK_XGMII;
713		else
714			efx->loopback_mode = __ffs(efx->loopback_modes);
715	}
716
717	__efx_reconfigure_port(efx);
718	mutex_unlock(&efx->mac_lock);
719
720	/* free up all consumers of SRAM (including all the queues) */
721	efx_reset_down(efx, reset_method);
722
723	rc = efx_test_chip(efx, tests);
724	if (rc && !rc_test)
725		rc_test = rc;
726
727	/* reset the chip to recover from the register test */
728	rc_reset = efx->type->reset(efx, reset_method);
729
730	/* Ensure that the phy is powered and out of loopback
731	 * for the bist and loopback tests */
732	efx->phy_mode &= ~PHY_MODE_LOW_POWER;
733	efx->loopback_mode = LOOPBACK_NONE;
734
735	rc = efx_reset_up(efx, reset_method, rc_reset == 0);
736	if (rc && !rc_reset)
737		rc_reset = rc;
738
739	if (rc_reset) {
740		netif_err(efx, drv, efx->net_dev,
741			  "Unable to recover from chip test\n");
742		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
743		return rc_reset;
744	}
745
746	rc = efx_test_phy(efx, tests, flags);
747	if (rc && !rc_test)
748		rc_test = rc;
749
750	rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
751	if (rc && !rc_test)
752		rc_test = rc;
753
754	/* restore the PHY to the previous state */
755	mutex_lock(&efx->mac_lock);
756	efx->phy_mode = phy_mode;
757	efx->port_inhibited = false;
758	efx->loopback_mode = loopback_mode;
759	__efx_reconfigure_port(efx);
760	mutex_unlock(&efx->mac_lock);
761
762	return rc_test;
763}
764