1/*
2 *  linux/drivers/message/fusion/mptlan.c
3 *      IP Over Fibre Channel device driver.
4 *      For use with PCI chip/adapter(s):
5 *          LSIFC9xx/LSI409xx Fibre Channel
6 *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 *  Credits:
9 *      This driver would not exist if not for Alan Cox's development
10 *      of the linux i2o driver.
11 *
12 *      Special thanks goes to the I2O LAN driver people at the
13 *      University of Helsinki, who, unbeknownst to them, provided
14 *      the inspiration and initial structure for this driver.
15 *
16 *      A huge debt of gratitude is owed to David S. Miller (DaveM)
17 *      for fixing much of the stupid and broken stuff in the early
18 *      driver while porting to sparc64 platform.  THANK YOU!
19 *
20 *      A really huge debt of gratitude is owed to Eddie C. Dost
21 *      for gobs of hard work fixing and optimizing LAN code.
22 *      THANK YOU!
23 *
24 *      (see also mptbase.c)
25 *
26 *  Copyright (c) 2000-2002 LSI Logic Corporation
27 *  Originally By: Noah Romer
28 *
29 *  $Id: mptlan.c,v 1.1.1.1 2008/10/15 03:26:34 james26_jang Exp $
30 */
31/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
32/*
33    This program is free software; you can redistribute it and/or modify
34    it under the terms of the GNU General Public License as published by
35    the Free Software Foundation; version 2 of the License.
36
37    This program is distributed in the hope that it will be useful,
38    but WITHOUT ANY WARRANTY; without even the implied warranty of
39    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
40    GNU General Public License for more details.
41
42    NO WARRANTY
43    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
44    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
45    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
46    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
47    solely responsible for determining the appropriateness of using and
48    distributing the Program and assumes all risks associated with its
49    exercise of rights under this Agreement, including but not limited to
50    the risks and costs of program errors, damage to or loss of data,
51    programs or equipment, and unavailability or interruption of operations.
52
53    DISCLAIMER OF LIABILITY
54    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
55    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
57    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
58    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
59    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
60    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
61
62    You should have received a copy of the GNU General Public License
63    along with this program; if not, write to the Free Software
64    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
65*/
66
67/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
68/*
69 * Define statements used for debugging
70 */
71//#define MPT_LAN_IO_DEBUG
72
73/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
74
75#include "mptlan.h"
76#include <linux/init.h>
77#include <linux/module.h>
78#include <linux/fs.h>
79
80#define MYNAM		"mptlan"
81
82MODULE_LICENSE("GPL");
83
84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
85/*
86 * MPT LAN message sizes without variable part.
87 */
88#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
89	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
90
91#define MPT_LAN_TRANSACTION32_SIZE \
92	(sizeof(SGETransaction32_t) - sizeof(u32))
93
94/*
95 *  Fusion MPT LAN private structures
96 */
97
98struct NAA_Hosed {
99	u16 NAA;
100	u8 ieee[FC_ALEN];
101	struct NAA_Hosed *next;
102};
103
104struct BufferControl {
105	struct sk_buff	*skb;
106	dma_addr_t	dma;
107	unsigned int	len;
108};
109
110struct mpt_lan_priv {
111	MPT_ADAPTER *mpt_dev;
112	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
113
114	atomic_t buckets_out;		/* number of unused buckets on IOC */
115	int bucketthresh;		/* Send more when this many left */
116
117	int *mpt_txfidx; /* Free Tx Context list */
118	int mpt_txfidx_tail;
119	spinlock_t txfidx_lock;
120
121	int *mpt_rxfidx; /* Free Rx Context list */
122	int mpt_rxfidx_tail;
123	spinlock_t rxfidx_lock;
124
125	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
126	struct BufferControl *SendCtl;	/* Send BufferControl structs */
127
128	int max_buckets_out;		/* Max buckets to send to IOC */
129	int tx_max_out;			/* IOC's Tx queue len */
130
131	u32 total_posted;
132	u32 total_received;
133	struct net_device_stats stats;	/* Per device statistics */
134
135	struct tq_struct post_buckets_task;
136	unsigned long post_buckets_active;
137};
138
139struct mpt_lan_ohdr {
140	u16	dtype;
141	u8	daddr[FC_ALEN];
142	u16	stype;
143	u8	saddr[FC_ALEN];
144};
145
146/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
147
148/*
149 *  Forward protos...
150 */
151static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
152		       MPT_FRAME_HDR *reply);
153static int  mpt_lan_open(struct net_device *dev);
154static int  mpt_lan_reset(struct net_device *dev);
155static int  mpt_lan_close(struct net_device *dev);
156static void mpt_lan_post_receive_buckets(void *dev_id);
157static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
158					   int priority);
159static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
160static int  mpt_lan_receive_post_reply(struct net_device *dev,
161				       LANReceivePostReply_t *pRecvRep);
162static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
163static int  mpt_lan_send_reply(struct net_device *dev,
164			       LANSendReply_t *pSendRep);
165static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
166static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
167static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
168					 struct net_device *dev);
169
170/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
171/*
172 *  Fusion MPT LAN private data
173 */
174static int LanCtx = -1;
175
176static u32 max_buckets_out = 127;
177static u32 tx_max_out_p = 127 - 16;
178
179static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
180
181#ifdef QLOGIC_NAA_WORKAROUND
182static struct NAA_Hosed *mpt_bad_naa = NULL;
183rwlock_t bad_naa_lock;
184#endif
185
186/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
187/*
188 * Fusion MPT LAN external data
189 */
190extern int mpt_lan_index;
191
192/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
193/**
194 *	lan_reply - Handle all data sent from the hardware.
195 *	@ioc: Pointer to MPT_ADAPTER structure
196 *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
197 *	@reply: Pointer to MPT reply frame
198 *
199 *	Returns 1 indicating original alloc'd request frame ptr
200 *	should be freed, or 0 if it shouldn't.
201 */
202static int
203lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
204{
205	struct net_device *dev = mpt_landev[ioc->id];
206	int FreeReqFrame = 0;
207
208	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
209		  IOC_AND_NETDEV_NAMES_s_s(dev)));
210
211//	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
212//			mf, reply));
213
214	if (mf == NULL) {
215		u32 tmsg = CAST_PTR_TO_U32(reply);
216
217		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
218				IOC_AND_NETDEV_NAMES_s_s(dev),
219				tmsg));
220
221		switch (GET_LAN_FORM(tmsg)) {
222
223		// NOTE!  (Optimization) First case here is now caught in
224		//  mptbase.c::mpt_interrupt() routine and callcack here
225		//  is now skipped for this case!  20001218 -sralston
226
227		case LAN_REPLY_FORM_SEND_SINGLE:
228//			dioprintk((MYNAM "/lan_reply: "
229//				  "calling mpt_lan_send_reply (turbo)\n"));
230
231			// Potential BUG here?  -sralston
232			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
233			//  If/when mpt_lan_send_turbo would return 1 here,
234			//  calling routine (mptbase.c|mpt_interrupt)
235			//  would Oops because mf has already been set
236			//  to NULL.  So after return from this func,
237			//  mpt_interrupt() will attempt to put (NULL) mf ptr
238			//  item back onto it's adapter FreeQ - Oops!:-(
239			//  It's Ok, since mpt_lan_send_turbo() *currently*
240			//  always returns 0, but..., just in case:
241
242			(void) mpt_lan_send_turbo(dev, tmsg);
243			FreeReqFrame = 0;
244
245			break;
246
247		case LAN_REPLY_FORM_RECEIVE_SINGLE:
248//			dioprintk((KERN_INFO MYNAM "@lan_reply: "
249//				  "rcv-Turbo = %08x\n", tmsg));
250			mpt_lan_receive_post_turbo(dev, tmsg);
251			break;
252
253		default:
254			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
255				"that I don't know what to do with\n");
256
257			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
258
259			break;
260		}
261
262		return FreeReqFrame;
263	}
264
265//	msg = (u32 *) reply;
266//	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
267//		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
268//		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
269//	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
270//		  reply->u.hdr.Function));
271
272	switch (reply->u.hdr.Function) {
273
274	case MPI_FUNCTION_LAN_SEND:
275	{
276		LANSendReply_t *pSendRep;
277
278		pSendRep = (LANSendReply_t *) reply;
279		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
280		break;
281	}
282
283	case MPI_FUNCTION_LAN_RECEIVE:
284	{
285		LANReceivePostReply_t *pRecvRep;
286
287		pRecvRep = (LANReceivePostReply_t *) reply;
288		if (pRecvRep->NumberOfContexts) {
289			mpt_lan_receive_post_reply(dev, pRecvRep);
290			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
291				FreeReqFrame = 1;
292		} else
293			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
294				  "ReceivePostReply received.\n"));
295		break;
296	}
297
298	case MPI_FUNCTION_LAN_RESET:
299		/* Just a default reply. Might want to check it to
300		 * make sure that everything went ok.
301		 */
302		FreeReqFrame = 1;
303		break;
304
305	case MPI_FUNCTION_EVENT_NOTIFICATION:
306	case MPI_FUNCTION_EVENT_ACK:
307		/* UPDATE!  20010120 -sralston
308		 *  _EVENT_NOTIFICATION should NOT come down this path any more.
309		 *  Should be routed to mpt_lan_event_process(), but just in case...
310		 */
311		FreeReqFrame = 1;
312		break;
313
314	default:
315		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
316			"reply that I don't know what to do with\n");
317
318		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
319		FreeReqFrame = 1;
320
321		break;
322	}
323
324	return FreeReqFrame;
325}
326
327/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
328static int
329mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
330{
331	struct net_device *dev = mpt_landev[ioc->id];
332	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
333
334	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
335			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"));
336
337	if (priv->mpt_rxfidx == NULL)
338		return (1);
339
340	if (reset_phase == MPT_IOC_PRE_RESET) {
341		int i;
342		unsigned long flags;
343
344		netif_stop_queue(dev);
345
346		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
347
348		atomic_set(&priv->buckets_out, 0);
349
350		/* Reset Rx Free Tail index and re-populate the queue. */
351		spin_lock_irqsave(&priv->rxfidx_lock, flags);
352		priv->mpt_rxfidx_tail = -1;
353		for (i = 0; i < priv->max_buckets_out; i++)
354			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
355		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
356	} else {
357		mpt_lan_post_receive_buckets(dev);
358		netif_wake_queue(dev);
359	}
360
361	return 1;
362}
363
364/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
365static int
366mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
367{
368	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
369
370	switch (le32_to_cpu(pEvReply->Event)) {
371	case MPI_EVENT_NONE:				/* 00 */
372	case MPI_EVENT_LOG_DATA:			/* 01 */
373	case MPI_EVENT_STATE_CHANGE:			/* 02 */
374	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
375	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
376	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
377	case MPI_EVENT_RESCAN:				/* 06 */
378		/* Ok, do we need to do anything here? As far as
379		   I can tell, this is when a new device gets added
380		   to the loop. */
381	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
382	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
383	case MPI_EVENT_LOGOUT:				/* 09 */
384	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
385	default:
386		break;
387	}
388
389	/*
390	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
391	 *  Do NOT do it here now!
392	 */
393
394	return 1;
395}
396
397/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
398static int
399mpt_lan_open(struct net_device *dev)
400{
401	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
402	int i;
403
404	if (mpt_lan_reset(dev) != 0) {
405		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
406
407		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
408
409		if (mpt_dev->active)
410			printk ("The ioc is active. Perhaps it needs to be"
411				" reset?\n");
412		else
413			printk ("The ioc in inactive, most likely in the "
414				"process of being reset. Please try again in "
415				"a moment.\n");
416	}
417
418	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
419	if (priv->mpt_txfidx == NULL)
420		goto out;
421	priv->mpt_txfidx_tail = -1;
422
423	priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
424				GFP_KERNEL);
425	if (priv->SendCtl == NULL)
426		goto out_mpt_txfidx;
427	for (i = 0; i < priv->tx_max_out; i++) {
428		memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
429		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
430	}
431
432	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
433
434	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
435				   GFP_KERNEL);
436	if (priv->mpt_rxfidx == NULL)
437		goto out_SendCtl;
438	priv->mpt_rxfidx_tail = -1;
439
440	priv->RcvCtl = kmalloc(priv->max_buckets_out *
441						sizeof(struct BufferControl),
442			       GFP_KERNEL);
443	if (priv->RcvCtl == NULL)
444		goto out_mpt_rxfidx;
445	for (i = 0; i < priv->max_buckets_out; i++) {
446		memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
447		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
448	}
449
450/**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
451/**/	for (i = 0; i < priv->tx_max_out; i++)
452/**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
453/**/	dlprintk(("\n"));
454
455	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
456
457	mpt_lan_post_receive_buckets(dev);
458	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
459			IOC_AND_NETDEV_NAMES_s_s(dev));
460
461	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
462		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
463			" Notifications. This is a bad thing! We're not going "
464			"to go ahead, but I'd be leery of system stability at "
465			"this point.\n");
466	}
467
468	netif_start_queue(dev);
469	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
470
471	return 0;
472out_mpt_rxfidx:
473	kfree(priv->mpt_rxfidx);
474	priv->mpt_rxfidx = NULL;
475out_SendCtl:
476	kfree(priv->SendCtl);
477	priv->SendCtl = NULL;
478out_mpt_txfidx:
479	kfree(priv->mpt_txfidx);
480	priv->mpt_txfidx = NULL;
481out:	return -ENOMEM;
482}
483
484/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
485/* Send a LanReset message to the FW. This should result in the FW returning
486   any buckets it still has. */
487static int
488mpt_lan_reset(struct net_device *dev)
489{
490	MPT_FRAME_HDR *mf;
491	LANResetRequest_t *pResetReq;
492	struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
493
494	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id);
495
496	if (mf == NULL) {
497/*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
498		"Unable to allocate a request frame.\n"));
499*/
500		return -1;
501	}
502
503	pResetReq = (LANResetRequest_t *) mf;
504
505	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
506	pResetReq->ChainOffset	= 0;
507	pResetReq->Reserved	= 0;
508	pResetReq->PortNumber	= priv->pnum;
509	pResetReq->MsgFlags	= 0;
510	pResetReq->Reserved2	= 0;
511
512	mpt_put_msg_frame(LanCtx, priv->mpt_dev->id, mf);
513
514	return 0;
515}
516
517/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
518static int
519mpt_lan_close(struct net_device *dev)
520{
521	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
522	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
523	unsigned int timeout;
524	int i;
525
526	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
527
528	mpt_event_deregister(LanCtx);
529
530	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
531		  "since driver was loaded, %d still out\n",
532		  priv->total_posted,atomic_read(&priv->buckets_out)));
533
534	netif_stop_queue(dev);
535
536	mpt_lan_reset(dev);
537
538	timeout = 2 * HZ;
539	while (atomic_read(&priv->buckets_out) && --timeout) {
540		set_current_state(TASK_INTERRUPTIBLE);
541		schedule_timeout(1);
542	}
543
544	for (i = 0; i < priv->max_buckets_out; i++) {
545		if (priv->RcvCtl[i].skb != NULL) {
546/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
547/**/				  "is still out\n", i));
548			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
549					 priv->RcvCtl[i].len,
550					 PCI_DMA_FROMDEVICE);
551			dev_kfree_skb(priv->RcvCtl[i].skb);
552		}
553	}
554
555	kfree (priv->RcvCtl);
556	kfree (priv->mpt_rxfidx);
557
558	for (i = 0; i < priv->tx_max_out; i++) {
559		if (priv->SendCtl[i].skb != NULL) {
560			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
561					 priv->SendCtl[i].len,
562					 PCI_DMA_TODEVICE);
563			dev_kfree_skb(priv->SendCtl[i].skb);
564		}
565	}
566
567	kfree(priv->SendCtl);
568	kfree(priv->mpt_txfidx);
569
570	atomic_set(&priv->buckets_out, 0);
571
572	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
573			IOC_AND_NETDEV_NAMES_s_s(dev));
574
575	return 0;
576}
577
578/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
579static struct net_device_stats *
580mpt_lan_get_stats(struct net_device *dev)
581{
582	struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
583
584	return (struct net_device_stats *) &priv->stats;
585}
586
587/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
588static int
589mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
590{
591	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
592		return -EINVAL;
593	dev->mtu = new_mtu;
594	return 0;
595}
596
597/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
598/* Tx timeout handler. */
599static void
600mpt_lan_tx_timeout(struct net_device *dev)
601{
602	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
603	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
604
605	if (mpt_dev->active) {
606		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
607		netif_wake_queue(dev);
608	}
609}
610
611/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
612//static inline int
613static int
614mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
615{
616	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
617	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
618	struct sk_buff *sent;
619	unsigned long flags;
620	u32 ctx;
621
622	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
623	sent = priv->SendCtl[ctx].skb;
624
625	priv->stats.tx_packets++;
626	priv->stats.tx_bytes += sent->len;
627
628	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
629			IOC_AND_NETDEV_NAMES_s_s(dev),
630			__FUNCTION__, sent));
631
632	priv->SendCtl[ctx].skb = NULL;
633	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
634			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
635	dev_kfree_skb_irq(sent);
636
637	spin_lock_irqsave(&priv->txfidx_lock, flags);
638	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
639	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
640
641	netif_wake_queue(dev);
642	return 0;
643}
644
645/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
646static int
647mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
648{
649	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
650	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
651	struct sk_buff *sent;
652	unsigned long flags;
653	int FreeReqFrame = 0;
654	u32 *pContext;
655	u32 ctx;
656	u8 count;
657
658	count = pSendRep->NumberOfContexts;
659
660	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
661		 le16_to_cpu(pSendRep->IOCStatus)));
662
663	/* Add check for Loginfo Flag in IOCStatus */
664
665	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
666	case MPI_IOCSTATUS_SUCCESS:
667		priv->stats.tx_packets += count;
668		break;
669
670	case MPI_IOCSTATUS_LAN_CANCELED:
671	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
672		break;
673
674	case MPI_IOCSTATUS_INVALID_SGL:
675		priv->stats.tx_errors += count;
676		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
677				IOC_AND_NETDEV_NAMES_s_s(dev));
678		goto out;
679
680	default:
681		priv->stats.tx_errors += count;
682		break;
683	}
684
685	pContext = &pSendRep->BufferContext;
686
687	spin_lock_irqsave(&priv->txfidx_lock, flags);
688	while (count > 0) {
689		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
690
691		sent = priv->SendCtl[ctx].skb;
692		priv->stats.tx_bytes += sent->len;
693
694		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
695				IOC_AND_NETDEV_NAMES_s_s(dev),
696				__FUNCTION__, sent));
697
698		priv->SendCtl[ctx].skb = NULL;
699		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
700				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
701		dev_kfree_skb_irq(sent);
702
703		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
704
705		pContext++;
706		count--;
707	}
708	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
709
710out:
711	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
712		FreeReqFrame = 1;
713
714	netif_wake_queue(dev);
715	return FreeReqFrame;
716}
717
718/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
719static int
720mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
721{
722	struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
723	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
724	MPT_FRAME_HDR *mf;
725	LANSendRequest_t *pSendReq;
726	SGETransaction32_t *pTrans;
727	SGESimple64_t *pSimple;
728	dma_addr_t dma;
729	unsigned long flags;
730	int ctx;
731	u16 cur_naa = 0x1000;
732
733	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
734			__FUNCTION__, skb));
735
736	spin_lock_irqsave(&priv->txfidx_lock, flags);
737	if (priv->mpt_txfidx_tail < 0) {
738		netif_stop_queue(dev);
739		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
740
741		printk (KERN_ERR "%s: no tx context available: %u\n",
742			__FUNCTION__, priv->mpt_txfidx_tail);
743		return 1;
744	}
745
746	mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
747	if (mf == NULL) {
748		netif_stop_queue(dev);
749		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
750
751		printk (KERN_ERR "%s: Unable to alloc request frame\n",
752			__FUNCTION__);
753		return 1;
754	}
755
756	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
757	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
758
759//	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
760//			IOC_AND_NETDEV_NAMES_s_s(dev)));
761
762	pSendReq = (LANSendRequest_t *) mf;
763
764	/* Set the mac.raw pointer, since this apparently isn't getting
765	 * done before we get the skb. Pull the data pointer past the mac data.
766	 */
767	skb->mac.raw = skb->data;
768	skb_pull(skb, 12);
769
770        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
771			     PCI_DMA_TODEVICE);
772
773	priv->SendCtl[ctx].skb = skb;
774	priv->SendCtl[ctx].dma = dma;
775	priv->SendCtl[ctx].len = skb->len;
776
777	/* Message Header */
778	pSendReq->Reserved    = 0;
779	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
780	pSendReq->ChainOffset = 0;
781	pSendReq->Reserved2   = 0;
782	pSendReq->MsgFlags    = 0;
783	pSendReq->PortNumber  = priv->pnum;
784
785	/* Transaction Context Element */
786	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
787
788	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
789	pTrans->ContextSize   = sizeof(u32);
790	pTrans->DetailsLength = 2 * sizeof(u32);
791	pTrans->Flags         = 0;
792	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
793
794//	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
795//			IOC_AND_NETDEV_NAMES_s_s(dev),
796//			ctx, skb, skb->data));
797
798#ifdef QLOGIC_NAA_WORKAROUND
799{
800	struct NAA_Hosed *nh;
801
802	/* Munge the NAA for Tx packets to QLogic boards, which don't follow
803	   RFC 2625. The longer I look at this, the more my opinion of Qlogic
804	   drops. */
805	read_lock_irq(&bad_naa_lock);
806	for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
807		if ((nh->ieee[0] == skb->mac.raw[0]) &&
808		    (nh->ieee[1] == skb->mac.raw[1]) &&
809		    (nh->ieee[2] == skb->mac.raw[2]) &&
810		    (nh->ieee[3] == skb->mac.raw[3]) &&
811		    (nh->ieee[4] == skb->mac.raw[4]) &&
812		    (nh->ieee[5] == skb->mac.raw[5])) {
813			cur_naa = nh->NAA;
814			dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
815				  "= %04x.\n", cur_naa));
816			break;
817		}
818	}
819	read_unlock_irq(&bad_naa_lock);
820}
821#endif
822
823	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
824						    (skb->mac.raw[0] <<  8) |
825						    (skb->mac.raw[1] <<  0));
826	pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
827						    (skb->mac.raw[3] << 16) |
828						    (skb->mac.raw[4] <<  8) |
829						    (skb->mac.raw[5] <<  0));
830
831	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
832
833	/* If we ever decide to send more than one Simple SGE per LANSend, then
834	   we will need to make sure that LAST_ELEMENT only gets set on the
835	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
836	pSimple->FlagsLength = cpu_to_le32(
837			((MPI_SGE_FLAGS_LAST_ELEMENT |
838			  MPI_SGE_FLAGS_END_OF_BUFFER |
839			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
840			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
841			  MPI_SGE_FLAGS_HOST_TO_IOC |
842			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
843			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
844			skb->len);
845	pSimple->Address.Low = cpu_to_le32((u32) dma);
846	if (sizeof(dma_addr_t) > sizeof(u32))
847		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
848	else
849		pSimple->Address.High = 0;
850
851	mpt_put_msg_frame (LanCtx, mpt_dev->id, mf);
852	dev->trans_start = jiffies;
853
854	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
855			IOC_AND_NETDEV_NAMES_s_s(dev),
856			le32_to_cpu(pSimple->FlagsLength)));
857
858	return 0;
859}
860
861/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
862static inline void
863mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
864/*
865 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
866 */
867{
868	struct mpt_lan_priv *priv = dev->priv;
869
870	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
871		if (priority) {
872			queue_task(&priv->post_buckets_task, &tq_immediate);
873			mark_bh(IMMEDIATE_BH);
874		} else {
875			queue_task(&priv->post_buckets_task, &tq_timer);
876			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
877				   "timer.\n"));
878		}
879	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
880			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
881	}
882}
883
884/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
885static inline int
886mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
887{
888	struct mpt_lan_priv *priv = dev->priv;
889
890	skb->protocol = mpt_lan_type_trans(skb, dev);
891
892	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
893		 "delivered to upper level.\n",
894			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
895
896	priv->stats.rx_bytes += skb->len;
897	priv->stats.rx_packets++;
898
899	skb->dev = dev;
900	netif_rx(skb);
901
902	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
903		 atomic_read(&priv->buckets_out)));
904
905	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
906		mpt_lan_wake_post_buckets_task(dev, 1);
907
908	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
909		  "remaining, %d received back since sod\n",
910		  atomic_read(&priv->buckets_out), priv->total_received));
911
912	return 0;
913}
914
915/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
916//static inline int
917static int
918mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
919{
920	struct mpt_lan_priv *priv = dev->priv;
921	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
922	struct sk_buff *skb, *old_skb;
923	unsigned long flags;
924	u32 ctx, len;
925
926	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
927	skb = priv->RcvCtl[ctx].skb;
928
929	len = GET_LAN_PACKET_LENGTH(tmsg);
930
931	if (len < MPT_LAN_RX_COPYBREAK) {
932		old_skb = skb;
933
934		skb = (struct sk_buff *)dev_alloc_skb(len);
935		if (!skb) {
936			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
937					IOC_AND_NETDEV_NAMES_s_s(dev),
938					__FILE__, __LINE__);
939			return -ENOMEM;
940		}
941
942		pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
943				    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
944
945		memcpy(skb_put(skb, len), old_skb->data, len);
946
947		goto out;
948	}
949
950	skb_put(skb, len);
951
952	priv->RcvCtl[ctx].skb = NULL;
953
954	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
955			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
956
957out:
958	spin_lock_irqsave(&priv->rxfidx_lock, flags);
959	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
960	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
961
962	atomic_dec(&priv->buckets_out);
963	priv->total_received++;
964
965	return mpt_lan_receive_skb(dev, skb);
966}
967
968/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
969static int
970mpt_lan_receive_post_free(struct net_device *dev,
971			  LANReceivePostReply_t *pRecvRep)
972{
973	struct mpt_lan_priv *priv = dev->priv;
974	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
975	unsigned long flags;
976	struct sk_buff *skb;
977	u32 ctx;
978	int count;
979	int i;
980
981	count = pRecvRep->NumberOfContexts;
982
983/**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
984		  "IOC returned %d buckets, freeing them...\n", count));
985
986	spin_lock_irqsave(&priv->rxfidx_lock, flags);
987	for (i = 0; i < count; i++) {
988		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
989
990		skb = priv->RcvCtl[ctx].skb;
991
992//		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
993//				IOC_AND_NETDEV_NAMES_s_s(dev)));
994//		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
995//				priv, &(priv->buckets_out)));
996//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
997
998		priv->RcvCtl[ctx].skb = NULL;
999		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1000				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1001		dev_kfree_skb_any(skb);
1002
1003		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1004	}
1005	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1006
1007	atomic_sub(count, &priv->buckets_out);
1008
1009//	for (i = 0; i < priv->max_buckets_out; i++)
1010//		if (priv->RcvCtl[i].skb != NULL)
1011//			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1012//				  "is still out\n", i));
1013
1014/*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1015		  count));
1016*/
1017/**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1018/**/		  "remaining, %d received back since sod.\n",
1019/**/		  atomic_read(&priv->buckets_out), priv->total_received));
1020	return 0;
1021}
1022
1023/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1024static int
1025mpt_lan_receive_post_reply(struct net_device *dev,
1026			   LANReceivePostReply_t *pRecvRep)
1027{
1028	struct mpt_lan_priv *priv = dev->priv;
1029	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1030	struct sk_buff *skb, *old_skb;
1031	unsigned long flags;
1032	u32 len, ctx, offset;
1033	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1034	int count;
1035	int i, l;
1036
1037	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1038	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1039		 le16_to_cpu(pRecvRep->IOCStatus)));
1040
1041	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1042						MPI_IOCSTATUS_LAN_CANCELED)
1043		return mpt_lan_receive_post_free(dev, pRecvRep);
1044
1045	len = le32_to_cpu(pRecvRep->PacketLength);
1046	if (len == 0) {
1047		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1048			"ReceivePostReply w/ PacketLength zero!\n",
1049				IOC_AND_NETDEV_NAMES_s_s(dev));
1050		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1051				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1052		return -1;
1053	}
1054
1055	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1056	count  = pRecvRep->NumberOfContexts;
1057	skb    = priv->RcvCtl[ctx].skb;
1058
1059	offset = le32_to_cpu(pRecvRep->PacketOffset);
1060//	if (offset != 0) {
1061//		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1062//			"w/ PacketOffset %u\n",
1063//				IOC_AND_NETDEV_NAMES_s_s(dev),
1064//				offset);
1065//	}
1066
1067	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1068			IOC_AND_NETDEV_NAMES_s_s(dev),
1069			offset, len));
1070
1071	if (count > 1) {
1072		int szrem = len;
1073
1074//		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1075//			"for single packet, concatenating...\n",
1076//				IOC_AND_NETDEV_NAMES_s_s(dev)));
1077
1078		skb = (struct sk_buff *)dev_alloc_skb(len);
1079		if (!skb) {
1080			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1081					IOC_AND_NETDEV_NAMES_s_s(dev),
1082					__FILE__, __LINE__);
1083			return -ENOMEM;
1084		}
1085
1086		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1087		for (i = 0; i < count; i++) {
1088
1089			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1090			old_skb = priv->RcvCtl[ctx].skb;
1091
1092			l = priv->RcvCtl[ctx].len;
1093			if (szrem < l)
1094				l = szrem;
1095
1096//			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1097//					IOC_AND_NETDEV_NAMES_s_s(dev),
1098//					i, l));
1099
1100			pci_dma_sync_single(mpt_dev->pcidev,
1101					    priv->RcvCtl[ctx].dma,
1102					    priv->RcvCtl[ctx].len,
1103					    PCI_DMA_FROMDEVICE);
1104			memcpy(skb_put(skb, l), old_skb->data, l);
1105
1106			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1107			szrem -= l;
1108		}
1109		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1110
1111	} else if (len < MPT_LAN_RX_COPYBREAK) {
1112
1113		old_skb = skb;
1114
1115		skb = (struct sk_buff *)dev_alloc_skb(len);
1116		if (!skb) {
1117			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1118					IOC_AND_NETDEV_NAMES_s_s(dev),
1119					__FILE__, __LINE__);
1120			return -ENOMEM;
1121		}
1122
1123		pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1124				    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1125
1126		memcpy(skb_put(skb, len), old_skb->data, len);
1127
1128		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1129		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1130		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1131
1132	} else {
1133		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1134
1135		priv->RcvCtl[ctx].skb = NULL;
1136
1137		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1138				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1139		priv->RcvCtl[ctx].dma = 0;
1140
1141		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1142		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1143
1144		skb_put(skb,len);
1145	}
1146
1147	atomic_sub(count, &priv->buckets_out);
1148	priv->total_received += count;
1149
1150	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1151		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1152			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1153				IOC_AND_NETDEV_NAMES_s_s(dev),
1154				priv->mpt_rxfidx_tail,
1155				MPT_LAN_MAX_BUCKETS_OUT);
1156
1157		panic("Damn it Jim! I'm a doctor, not a programmer! "
1158				"Oh, wait a sec, I am a programmer. "
1159				"And, who's Jim?!?!\n"
1160				"Arrgghh! We've done it again!\n");
1161	}
1162
1163	if (remaining == 0)
1164		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1165			"(priv->buckets_out = %d)\n",
1166			IOC_AND_NETDEV_NAMES_s_s(dev),
1167			atomic_read(&priv->buckets_out));
1168	else if (remaining < 10)
1169		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1170			"(priv->buckets_out = %d)\n",
1171			IOC_AND_NETDEV_NAMES_s_s(dev),
1172			remaining, atomic_read(&priv->buckets_out));
1173
1174	if ((remaining < priv->bucketthresh) &&
1175	    ((atomic_read(&priv->buckets_out) - remaining) >
1176	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1177
1178		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1179			"buckets_out count and fw's BucketsRemaining "
1180			"count has crossed the threshold, issuing a "
1181			"LanReset to clear the fw's hashtable. You may "
1182			"want to check your /var/log/messages for \"CRC "
1183			"error\" event notifications.\n");
1184
1185		mpt_lan_reset(dev);
1186		mpt_lan_wake_post_buckets_task(dev, 0);
1187	}
1188
1189	return mpt_lan_receive_skb(dev, skb);
1190}
1191
1192/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1193/* Simple SGE's only at the moment */
1194
1195static void
1196mpt_lan_post_receive_buckets(void *dev_id)
1197{
1198	struct net_device *dev = dev_id;
1199	struct mpt_lan_priv *priv = dev->priv;
1200	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1201	MPT_FRAME_HDR *mf;
1202	LANReceivePostRequest_t *pRecvReq;
1203	SGETransaction32_t *pTrans;
1204	SGESimple64_t *pSimple;
1205	struct sk_buff *skb;
1206	dma_addr_t dma;
1207	u32 curr, buckets, count, max;
1208	u32 len = (dev->mtu + dev->hard_header_len + 4);
1209	unsigned long flags;
1210	int i;
1211
1212	curr = atomic_read(&priv->buckets_out);
1213	buckets = (priv->max_buckets_out - curr);
1214
1215	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1216			IOC_AND_NETDEV_NAMES_s_s(dev),
1217			__FUNCTION__, buckets, curr));
1218
1219	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1220			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1221
1222	while (buckets) {
1223		mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
1224		if (mf == NULL) {
1225			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1226				__FUNCTION__);
1227			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1228				 __FUNCTION__, buckets));
1229			goto out;
1230		}
1231		pRecvReq = (LANReceivePostRequest_t *) mf;
1232
1233		count = buckets;
1234		if (count > max)
1235			count = max;
1236
1237		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1238		pRecvReq->ChainOffset = 0;
1239		pRecvReq->MsgFlags    = 0;
1240		pRecvReq->PortNumber  = priv->pnum;
1241
1242		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1243		pSimple = NULL;
1244
1245		for (i = 0; i < count; i++) {
1246			int ctx;
1247
1248			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1249			if (priv->mpt_rxfidx_tail < 0) {
1250				printk (KERN_ERR "%s: Can't alloc context\n",
1251					__FUNCTION__);
1252				spin_unlock_irqrestore(&priv->rxfidx_lock,
1253						       flags);
1254				break;
1255			}
1256
1257			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1258
1259			skb = priv->RcvCtl[ctx].skb;
1260			if (skb && (priv->RcvCtl[ctx].len != len)) {
1261				pci_unmap_single(mpt_dev->pcidev,
1262						 priv->RcvCtl[ctx].dma,
1263						 priv->RcvCtl[ctx].len,
1264						 PCI_DMA_FROMDEVICE);
1265				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1266				skb = priv->RcvCtl[ctx].skb = NULL;
1267			}
1268
1269			if (skb == NULL) {
1270				skb = dev_alloc_skb(len);
1271				if (skb == NULL) {
1272					printk (KERN_WARNING
1273						MYNAM "/%s: Can't alloc skb\n",
1274						__FUNCTION__);
1275					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1276					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1277					break;
1278				}
1279
1280				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1281						     len, PCI_DMA_FROMDEVICE);
1282
1283				priv->RcvCtl[ctx].skb = skb;
1284				priv->RcvCtl[ctx].dma = dma;
1285				priv->RcvCtl[ctx].len = len;
1286			}
1287
1288			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1289
1290			pTrans->ContextSize   = sizeof(u32);
1291			pTrans->DetailsLength = 0;
1292			pTrans->Flags         = 0;
1293			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1294
1295			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1296
1297			pSimple->FlagsLength = cpu_to_le32(
1298				((MPI_SGE_FLAGS_END_OF_BUFFER |
1299				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1300				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1301			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1302			if (sizeof(dma_addr_t) > sizeof(u32))
1303				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1304			else
1305				pSimple->Address.High = 0;
1306
1307			pTrans = (SGETransaction32_t *) (pSimple + 1);
1308		}
1309
1310		if (pSimple == NULL) {
1311/**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1312/**/				__FUNCTION__);
1313			mpt_free_msg_frame(LanCtx, mpt_dev->id, mf);
1314			goto out;
1315		}
1316
1317		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1318
1319		pRecvReq->BucketCount = cpu_to_le32(i);
1320
1321/*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1322 *	for (i = 0; i < j + 2; i ++)
1323 *	    printk (" %08x", le32_to_cpu(msg[i]));
1324 *	printk ("\n");
1325 */
1326
1327		mpt_put_msg_frame(LanCtx, mpt_dev->id, mf);
1328
1329		priv->total_posted += i;
1330		buckets -= i;
1331		atomic_add(i, &priv->buckets_out);
1332	}
1333
1334out:
1335	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1336		  __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1337	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1338	__FUNCTION__, priv->total_posted, priv->total_received));
1339
1340	clear_bit(0, &priv->post_buckets_active);
1341}
1342
1343/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1344struct net_device *
1345mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1346{
1347	struct net_device *dev = NULL;
1348	struct mpt_lan_priv *priv = NULL;
1349	u8 HWaddr[FC_ALEN], *a;
1350
1351	dev = init_fcdev(NULL, sizeof(struct mpt_lan_priv));
1352	if (!dev)
1353		return (NULL);
1354	dev->mtu = MPT_LAN_MTU;
1355
1356	priv = (struct mpt_lan_priv *) dev->priv;
1357
1358	priv->mpt_dev = mpt_dev;
1359	priv->pnum = pnum;
1360
1361	memset(&priv->post_buckets_task, 0, sizeof(struct tq_struct));
1362	priv->post_buckets_task.routine = mpt_lan_post_receive_buckets;
1363	priv->post_buckets_task.data = dev;
1364	priv->post_buckets_active = 0;
1365
1366	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1367			__LINE__, dev->mtu + dev->hard_header_len + 4));
1368
1369	atomic_set(&priv->buckets_out, 0);
1370	priv->total_posted = 0;
1371	priv->total_received = 0;
1372	priv->max_buckets_out = max_buckets_out;
1373	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1374		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1375
1376	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1377			__LINE__,
1378			mpt_dev->pfacts[0].MaxLanBuckets,
1379			max_buckets_out,
1380			priv->max_buckets_out));
1381
1382	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1383	priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1384	priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1385
1386	memset(&priv->stats, 0, sizeof(priv->stats));
1387
1388	/*  Grab pre-fetched LANPage1 stuff. :-) */
1389	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1390
1391	HWaddr[0] = a[5];
1392	HWaddr[1] = a[4];
1393	HWaddr[2] = a[3];
1394	HWaddr[3] = a[2];
1395	HWaddr[4] = a[1];
1396	HWaddr[5] = a[0];
1397
1398	dev->addr_len = FC_ALEN;
1399	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1400	memset(dev->broadcast, 0xff, FC_ALEN);
1401
1402	/* The Tx queue is 127 deep on the 909.
1403	 * Give ourselves some breathing room.
1404	 */
1405	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1406			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1407
1408	dev->open = mpt_lan_open;
1409	dev->stop = mpt_lan_close;
1410	dev->get_stats = mpt_lan_get_stats;
1411	dev->set_multicast_list = NULL;
1412	dev->change_mtu = mpt_lan_change_mtu;
1413	dev->hard_start_xmit = mpt_lan_sdu_send;
1414
1415/* Not in 2.3.42. Need 2.3.45+ */
1416	dev->tx_timeout = mpt_lan_tx_timeout;
1417	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1418
1419	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1420		"and setting initial values\n"));
1421
1422	SET_MODULE_OWNER(dev);
1423
1424	return dev;
1425}
1426
1427/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1428int __init
1429mpt_lan_init (void)
1430{
1431	struct net_device *dev;
1432	MPT_ADAPTER *curadapter;
1433	int i, j;
1434
1435	show_mptmod_ver(LANAME, LANVER);
1436
1437#ifdef QLOGIC_NAA_WORKAROUND
1438	/* Init the global r/w lock for the bad_naa list. We want to do this
1439	   before any boards are initialized and may be used. */
1440	rwlock_init(&bad_naa_lock);
1441#endif
1442
1443	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1444		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1445		return -EBUSY;
1446	}
1447
1448	/* Set the callback index to be used by driver core for turbo replies */
1449	mpt_lan_index = LanCtx;
1450
1451	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1452
1453	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1454		dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1455	} else {
1456		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1457		       "handler with mptbase! The world is at an end! "
1458		       "Everything is fading to black! Goodbye.\n");
1459		return -EBUSY;
1460	}
1461
1462	for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1463		mpt_landev[j] = NULL;
1464	}
1465
1466	curadapter = mpt_adapter_find_first();
1467	while (curadapter != NULL) {
1468		for (i = 0; i < curadapter->facts.NumberOfPorts; i++) {
1469			printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1470					curadapter->name,
1471					curadapter->pfacts[i].PortNumber,
1472					curadapter->pfacts[i].ProtocolFlags,
1473					MPT_PROTOCOL_FLAGS_c_c_c_c(curadapter->pfacts[i].ProtocolFlags));
1474
1475			if (curadapter->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
1476				dev = mpt_register_lan_device (curadapter, i);
1477				if (dev != NULL) {
1478					printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1479							curadapter->name, dev->name);
1480					printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1481							IOC_AND_NETDEV_NAMES_s_s(dev),
1482							dev->dev_addr[0], dev->dev_addr[1],
1483							dev->dev_addr[2], dev->dev_addr[3],
1484							dev->dev_addr[4], dev->dev_addr[5]);
1485//					printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1486//							IOC_AND_NETDEV_NAMES_s_s(dev),
1487//							NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1488					j = curadapter->id;
1489					mpt_landev[j] = dev;
1490					dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1491							dev, j,  mpt_landev[j]));
1492
1493				} else {
1494					printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1495							curadapter->name,
1496							curadapter->pfacts[i].PortNumber);
1497				}
1498			} else {
1499				printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1500						curadapter->name);
1501			}
1502		}
1503		curadapter = mpt_adapter_find_next(curadapter);
1504	}
1505
1506	return 0;
1507}
1508
1509/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1510void __init mpt_lan_exit(void)
1511{
1512	int i;
1513
1514	mpt_reset_deregister(LanCtx);
1515
1516	for (i = 0; mpt_landev[i] != NULL; i++) {
1517		struct net_device *dev = mpt_landev[i];
1518
1519		printk (KERN_INFO MYNAM ": %s/%s: Fusion MPT LAN device unregistered\n",
1520			       IOC_AND_NETDEV_NAMES_s_s(dev));
1521		unregister_fcdev(dev);
1522		mpt_landev[i] = (struct net_device *) 0xdeadbeef; /* Debug */
1523	}
1524
1525	if (LanCtx >= 0) {
1526		mpt_deregister(LanCtx);
1527		LanCtx = -1;
1528		mpt_lan_index = 0;
1529	}
1530
1531	/* deregister any send/receive handler structs. I2Oism? */
1532}
1533
1534/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1535
1536MODULE_PARM(tx_max_out_p, "i");
1537MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME!
1538
1539module_init(mpt_lan_init);
1540module_exit(mpt_lan_exit);
1541
1542/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1543static unsigned short
1544mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1545{
1546	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1547	struct fcllc *fcllc;
1548
1549	skb->mac.raw = skb->data;
1550	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1551
1552	if (fch->dtype == htons(0xffff)) {
1553		u32 *p = (u32 *) fch;
1554
1555		swab32s(p + 0);
1556		swab32s(p + 1);
1557		swab32s(p + 2);
1558		swab32s(p + 3);
1559
1560		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1561				NETDEV_PTR_TO_IOC_NAME_s(dev));
1562		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1563				fch->saddr[0], fch->saddr[1], fch->saddr[2],
1564				fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1565	}
1566
1567	if (*fch->daddr & 1) {
1568		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1569			skb->pkt_type = PACKET_BROADCAST;
1570		} else {
1571			skb->pkt_type = PACKET_MULTICAST;
1572		}
1573	} else {
1574		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1575			skb->pkt_type = PACKET_OTHERHOST;
1576		} else {
1577			skb->pkt_type = PACKET_HOST;
1578		}
1579	}
1580
1581	fcllc = (struct fcllc *)skb->data;
1582
1583#ifdef QLOGIC_NAA_WORKAROUND
1584{
1585	u16 source_naa = fch->stype, found = 0;
1586
1587
1588	if ((source_naa & 0xF000) == 0)
1589		source_naa = swab16(source_naa);
1590
1591	if (fcllc->ethertype == htons(ETH_P_ARP))
1592	    dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1593		      "%04x.\n", source_naa));
1594
1595	if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1596	   ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1597		struct NAA_Hosed *nh, *prevnh;
1598		int i;
1599
1600		dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1601			  "system with non-RFC 2625 NAA value (%04x).\n",
1602			  source_naa));
1603
1604		write_lock_irq(&bad_naa_lock);
1605		for (prevnh = nh = mpt_bad_naa; nh != NULL;
1606		     prevnh=nh, nh=nh->next) {
1607			if ((nh->ieee[0] == fch->saddr[0]) &&
1608			    (nh->ieee[1] == fch->saddr[1]) &&
1609			    (nh->ieee[2] == fch->saddr[2]) &&
1610			    (nh->ieee[3] == fch->saddr[3]) &&
1611			    (nh->ieee[4] == fch->saddr[4]) &&
1612			    (nh->ieee[5] == fch->saddr[5])) {
1613				found = 1;
1614				dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1615					 "q/Rep w/ bad NAA from system already"
1616					 " in DB.\n"));
1617				break;
1618			}
1619		}
1620
1621		if ((!found) && (nh == NULL)) {
1622
1623			nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1624			dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1625				 " bad NAA from system not yet in DB.\n"));
1626
1627			if (nh != NULL) {
1628				nh->next = NULL;
1629				if (!mpt_bad_naa)
1630					mpt_bad_naa = nh;
1631				if (prevnh)
1632					prevnh->next = nh;
1633
1634				nh->NAA = source_naa; /* Set the S_NAA value. */
1635				for (i = 0; i < FC_ALEN; i++)
1636					nh->ieee[i] = fch->saddr[i];
1637				dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1638					  "%02x:%02x with non-compliant S_NAA value.\n",
1639					  fch->saddr[0], fch->saddr[1], fch->saddr[2],
1640					  fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1641			} else {
1642				printk (KERN_ERR "mptlan/type_trans: Unable to"
1643					" kmalloc a NAA_Hosed struct.\n");
1644			}
1645		} else if (!found) {
1646			printk (KERN_ERR "mptlan/type_trans: found not"
1647				" set, but nh isn't null. Evil "
1648				"funkiness abounds.\n");
1649		}
1650		write_unlock_irq(&bad_naa_lock);
1651	}
1652}
1653#endif
1654
1655	/* Strip the SNAP header from ARP packets since we don't
1656	 * pass them through to the 802.2/SNAP layers.
1657	 */
1658	if (fcllc->dsap == EXTENDED_SAP &&
1659		(fcllc->ethertype == htons(ETH_P_IP) ||
1660		 fcllc->ethertype == htons(ETH_P_ARP))) {
1661		skb_pull(skb, sizeof(struct fcllc));
1662		return fcllc->ethertype;
1663	}
1664
1665	return htons(ETH_P_802_2);
1666}
1667
1668/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1669