1/*
2 *  linux/drivers/message/fusion/mptlan.c
3 *      IP Over Fibre Channel device driver.
4 *      For use with LSI Logic Fibre Channel PCI chip/adapters
5 *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
6 *
7 *  Copyright (c) 2000-2007 LSI Logic Corporation
8 *  (mailto:mpt_linux_developer@lsi.com)
9 *
10 */
11/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12/*
13    This program is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; version 2 of the License.
16
17    This program is distributed in the hope that it will be useful,
18    but WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20    GNU General Public License for more details.
21
22    NO WARRANTY
23    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27    solely responsible for determining the appropriateness of using and
28    distributing the Program and assumes all risks associated with its
29    exercise of rights under this Agreement, including but not limited to
30    the risks and costs of program errors, damage to or loss of data,
31    programs or equipment, and unavailability or interruption of operations.
32
33    DISCLAIMER OF LIABILITY
34    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41
42    You should have received a copy of the GNU General Public License
43    along with this program; if not, write to the Free Software
44    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45*/
46
47/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48/*
49 * Define statements used for debugging
50 */
51//#define MPT_LAN_IO_DEBUG
52
53/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54
55#include "mptlan.h"
56#include <linux/init.h>
57#include <linux/module.h>
58#include <linux/fs.h>
59
60#define my_VERSION	MPT_LINUX_VERSION_COMMON
61#define MYNAM		"mptlan"
62
63MODULE_LICENSE("GPL");
64MODULE_VERSION(my_VERSION);
65
66/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
67/*
68 * MPT LAN message sizes without variable part.
69 */
70#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
71	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
72
73#define MPT_LAN_TRANSACTION32_SIZE \
74	(sizeof(SGETransaction32_t) - sizeof(u32))
75
76/*
77 *  Fusion MPT LAN private structures
78 */
79
80struct NAA_Hosed {
81	u16 NAA;
82	u8 ieee[FC_ALEN];
83	struct NAA_Hosed *next;
84};
85
86struct BufferControl {
87	struct sk_buff	*skb;
88	dma_addr_t	dma;
89	unsigned int	len;
90};
91
92struct mpt_lan_priv {
93	MPT_ADAPTER *mpt_dev;
94	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
95
96	atomic_t buckets_out;		/* number of unused buckets on IOC */
97	int bucketthresh;		/* Send more when this many left */
98
99	int *mpt_txfidx; /* Free Tx Context list */
100	int mpt_txfidx_tail;
101	spinlock_t txfidx_lock;
102
103	int *mpt_rxfidx; /* Free Rx Context list */
104	int mpt_rxfidx_tail;
105	spinlock_t rxfidx_lock;
106
107	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
108	struct BufferControl *SendCtl;	/* Send BufferControl structs */
109
110	int max_buckets_out;		/* Max buckets to send to IOC */
111	int tx_max_out;			/* IOC's Tx queue len */
112
113	u32 total_posted;
114	u32 total_received;
115	struct net_device_stats stats;	/* Per device statistics */
116
117	struct delayed_work post_buckets_task;
118	struct net_device *dev;
119	unsigned long post_buckets_active;
120};
121
122struct mpt_lan_ohdr {
123	u16	dtype;
124	u8	daddr[FC_ALEN];
125	u16	stype;
126	u8	saddr[FC_ALEN];
127};
128
129/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
130
131/*
132 *  Forward protos...
133 */
134static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
135		       MPT_FRAME_HDR *reply);
136static int  mpt_lan_open(struct net_device *dev);
137static int  mpt_lan_reset(struct net_device *dev);
138static int  mpt_lan_close(struct net_device *dev);
139static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
140static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
141					   int priority);
142static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
143static int  mpt_lan_receive_post_reply(struct net_device *dev,
144				       LANReceivePostReply_t *pRecvRep);
145static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
146static int  mpt_lan_send_reply(struct net_device *dev,
147			       LANSendReply_t *pSendRep);
148static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
149static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
150static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
151					 struct net_device *dev);
152
153/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
154/*
155 *  Fusion MPT LAN private data
156 */
157static int LanCtx = -1;
158
159static u32 max_buckets_out = 127;
160static u32 tx_max_out_p = 127 - 16;
161
162#ifdef QLOGIC_NAA_WORKAROUND
163static struct NAA_Hosed *mpt_bad_naa = NULL;
164DEFINE_RWLOCK(bad_naa_lock);
165#endif
166
167/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
168/*
169 * Fusion MPT LAN external data
170 */
171extern int mpt_lan_index;
172
173/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
174/**
175 *	lan_reply - Handle all data sent from the hardware.
176 *	@ioc: Pointer to MPT_ADAPTER structure
177 *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
178 *	@reply: Pointer to MPT reply frame
179 *
180 *	Returns 1 indicating original alloc'd request frame ptr
181 *	should be freed, or 0 if it shouldn't.
182 */
183static int
184lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
185{
186	struct net_device *dev = ioc->netdev;
187	int FreeReqFrame = 0;
188
189	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
190		  IOC_AND_NETDEV_NAMES_s_s(dev)));
191
192//	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
193//			mf, reply));
194
195	if (mf == NULL) {
196		u32 tmsg = CAST_PTR_TO_U32(reply);
197
198		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
199				IOC_AND_NETDEV_NAMES_s_s(dev),
200				tmsg));
201
202		switch (GET_LAN_FORM(tmsg)) {
203
204		// NOTE!  (Optimization) First case here is now caught in
205		//  mptbase.c::mpt_interrupt() routine and callcack here
206		//  is now skipped for this case!
207
208		case LAN_REPLY_FORM_SEND_SINGLE:
209//			dioprintk((MYNAM "/lan_reply: "
210//				  "calling mpt_lan_send_reply (turbo)\n"));
211
212			// Potential BUG here?
213			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
214			//  If/when mpt_lan_send_turbo would return 1 here,
215			//  calling routine (mptbase.c|mpt_interrupt)
216			//  would Oops because mf has already been set
217			//  to NULL.  So after return from this func,
218			//  mpt_interrupt() will attempt to put (NULL) mf ptr
219			//  item back onto its adapter FreeQ - Oops!:-(
220			//  It's Ok, since mpt_lan_send_turbo() *currently*
221			//  always returns 0, but..., just in case:
222
223			(void) mpt_lan_send_turbo(dev, tmsg);
224			FreeReqFrame = 0;
225
226			break;
227
228		case LAN_REPLY_FORM_RECEIVE_SINGLE:
229//			dioprintk((KERN_INFO MYNAM "@lan_reply: "
230//				  "rcv-Turbo = %08x\n", tmsg));
231			mpt_lan_receive_post_turbo(dev, tmsg);
232			break;
233
234		default:
235			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
236				"that I don't know what to do with\n");
237
238			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
239
240			break;
241		}
242
243		return FreeReqFrame;
244	}
245
246//	msg = (u32 *) reply;
247//	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
248//		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
249//		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
250//	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
251//		  reply->u.hdr.Function));
252
253	switch (reply->u.hdr.Function) {
254
255	case MPI_FUNCTION_LAN_SEND:
256	{
257		LANSendReply_t *pSendRep;
258
259		pSendRep = (LANSendReply_t *) reply;
260		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
261		break;
262	}
263
264	case MPI_FUNCTION_LAN_RECEIVE:
265	{
266		LANReceivePostReply_t *pRecvRep;
267
268		pRecvRep = (LANReceivePostReply_t *) reply;
269		if (pRecvRep->NumberOfContexts) {
270			mpt_lan_receive_post_reply(dev, pRecvRep);
271			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
272				FreeReqFrame = 1;
273		} else
274			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
275				  "ReceivePostReply received.\n"));
276		break;
277	}
278
279	case MPI_FUNCTION_LAN_RESET:
280		/* Just a default reply. Might want to check it to
281		 * make sure that everything went ok.
282		 */
283		FreeReqFrame = 1;
284		break;
285
286	case MPI_FUNCTION_EVENT_NOTIFICATION:
287	case MPI_FUNCTION_EVENT_ACK:
288		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
289		 *  Should be routed to mpt_lan_event_process(), but just in case...
290		 */
291		FreeReqFrame = 1;
292		break;
293
294	default:
295		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
296			"reply that I don't know what to do with\n");
297
298		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
299		FreeReqFrame = 1;
300
301		break;
302	}
303
304	return FreeReqFrame;
305}
306
307/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
308static int
309mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
310{
311	struct net_device *dev = ioc->netdev;
312	struct mpt_lan_priv *priv;
313
314	if (dev == NULL)
315		return(1);
316	else
317		priv = netdev_priv(dev);
318
319	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
320			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
321			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
322
323	if (priv->mpt_rxfidx == NULL)
324		return (1);
325
326	if (reset_phase == MPT_IOC_SETUP_RESET) {
327		;
328	} else if (reset_phase == MPT_IOC_PRE_RESET) {
329		int i;
330		unsigned long flags;
331
332		netif_stop_queue(dev);
333
334		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
335
336		atomic_set(&priv->buckets_out, 0);
337
338		/* Reset Rx Free Tail index and re-populate the queue. */
339		spin_lock_irqsave(&priv->rxfidx_lock, flags);
340		priv->mpt_rxfidx_tail = -1;
341		for (i = 0; i < priv->max_buckets_out; i++)
342			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
343		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
344	} else {
345		mpt_lan_post_receive_buckets(priv);
346		netif_wake_queue(dev);
347	}
348
349	return 1;
350}
351
352/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
353static int
354mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
355{
356	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
357
358	switch (le32_to_cpu(pEvReply->Event)) {
359	case MPI_EVENT_NONE:				/* 00 */
360	case MPI_EVENT_LOG_DATA:			/* 01 */
361	case MPI_EVENT_STATE_CHANGE:			/* 02 */
362	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
363	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
364	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
365	case MPI_EVENT_RESCAN:				/* 06 */
366		/* Ok, do we need to do anything here? As far as
367		   I can tell, this is when a new device gets added
368		   to the loop. */
369	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
370	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
371	case MPI_EVENT_LOGOUT:				/* 09 */
372	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
373	default:
374		break;
375	}
376
377	/*
378	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
379	 *  Do NOT do it here now!
380	 */
381
382	return 1;
383}
384
385/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
386static int
387mpt_lan_open(struct net_device *dev)
388{
389	struct mpt_lan_priv *priv = netdev_priv(dev);
390	int i;
391
392	if (mpt_lan_reset(dev) != 0) {
393		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
394
395		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
396
397		if (mpt_dev->active)
398			printk ("The ioc is active. Perhaps it needs to be"
399				" reset?\n");
400		else
401			printk ("The ioc in inactive, most likely in the "
402				"process of being reset. Please try again in "
403				"a moment.\n");
404	}
405
406	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
407	if (priv->mpt_txfidx == NULL)
408		goto out;
409	priv->mpt_txfidx_tail = -1;
410
411	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
412				GFP_KERNEL);
413	if (priv->SendCtl == NULL)
414		goto out_mpt_txfidx;
415	for (i = 0; i < priv->tx_max_out; i++)
416		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
417
418	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
419
420	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
421				   GFP_KERNEL);
422	if (priv->mpt_rxfidx == NULL)
423		goto out_SendCtl;
424	priv->mpt_rxfidx_tail = -1;
425
426	priv->RcvCtl = kcalloc(priv->max_buckets_out,
427			       sizeof(struct BufferControl),
428			       GFP_KERNEL);
429	if (priv->RcvCtl == NULL)
430		goto out_mpt_rxfidx;
431	for (i = 0; i < priv->max_buckets_out; i++)
432		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
433
434/**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
435/**/	for (i = 0; i < priv->tx_max_out; i++)
436/**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
437/**/	dlprintk(("\n"));
438
439	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
440
441	mpt_lan_post_receive_buckets(priv);
442	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
443			IOC_AND_NETDEV_NAMES_s_s(dev));
444
445	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
446		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
447			" Notifications. This is a bad thing! We're not going "
448			"to go ahead, but I'd be leery of system stability at "
449			"this point.\n");
450	}
451
452	netif_start_queue(dev);
453	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
454
455	return 0;
456out_mpt_rxfidx:
457	kfree(priv->mpt_rxfidx);
458	priv->mpt_rxfidx = NULL;
459out_SendCtl:
460	kfree(priv->SendCtl);
461	priv->SendCtl = NULL;
462out_mpt_txfidx:
463	kfree(priv->mpt_txfidx);
464	priv->mpt_txfidx = NULL;
465out:	return -ENOMEM;
466}
467
468/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
469/* Send a LanReset message to the FW. This should result in the FW returning
470   any buckets it still has. */
471static int
472mpt_lan_reset(struct net_device *dev)
473{
474	MPT_FRAME_HDR *mf;
475	LANResetRequest_t *pResetReq;
476	struct mpt_lan_priv *priv = netdev_priv(dev);
477
478	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
479
480	if (mf == NULL) {
481/*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
482		"Unable to allocate a request frame.\n"));
483*/
484		return -1;
485	}
486
487	pResetReq = (LANResetRequest_t *) mf;
488
489	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
490	pResetReq->ChainOffset	= 0;
491	pResetReq->Reserved	= 0;
492	pResetReq->PortNumber	= priv->pnum;
493	pResetReq->MsgFlags	= 0;
494	pResetReq->Reserved2	= 0;
495
496	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
497
498	return 0;
499}
500
501/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
502static int
503mpt_lan_close(struct net_device *dev)
504{
505	struct mpt_lan_priv *priv = netdev_priv(dev);
506	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
507	unsigned long timeout;
508	int i;
509
510	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
511
512	mpt_event_deregister(LanCtx);
513
514	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
515		  "since driver was loaded, %d still out\n",
516		  priv->total_posted,atomic_read(&priv->buckets_out)));
517
518	netif_stop_queue(dev);
519
520	mpt_lan_reset(dev);
521
522	timeout = jiffies + 2 * HZ;
523	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
524		schedule_timeout_interruptible(1);
525
526	for (i = 0; i < priv->max_buckets_out; i++) {
527		if (priv->RcvCtl[i].skb != NULL) {
528/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
529/**/				  "is still out\n", i));
530			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
531					 priv->RcvCtl[i].len,
532					 PCI_DMA_FROMDEVICE);
533			dev_kfree_skb(priv->RcvCtl[i].skb);
534		}
535	}
536
537	kfree(priv->RcvCtl);
538	kfree(priv->mpt_rxfidx);
539
540	for (i = 0; i < priv->tx_max_out; i++) {
541		if (priv->SendCtl[i].skb != NULL) {
542			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
543					 priv->SendCtl[i].len,
544					 PCI_DMA_TODEVICE);
545			dev_kfree_skb(priv->SendCtl[i].skb);
546		}
547	}
548
549	kfree(priv->SendCtl);
550	kfree(priv->mpt_txfidx);
551
552	atomic_set(&priv->buckets_out, 0);
553
554	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
555			IOC_AND_NETDEV_NAMES_s_s(dev));
556
557	return 0;
558}
559
560/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
561static struct net_device_stats *
562mpt_lan_get_stats(struct net_device *dev)
563{
564	struct mpt_lan_priv *priv = netdev_priv(dev);
565
566	return (struct net_device_stats *) &priv->stats;
567}
568
569/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
570static int
571mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
572{
573	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
574		return -EINVAL;
575	dev->mtu = new_mtu;
576	return 0;
577}
578
579/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
580/* Tx timeout handler. */
581static void
582mpt_lan_tx_timeout(struct net_device *dev)
583{
584	struct mpt_lan_priv *priv = netdev_priv(dev);
585	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
586
587	if (mpt_dev->active) {
588		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
589		netif_wake_queue(dev);
590	}
591}
592
593/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
594//static inline int
595static int
596mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
597{
598	struct mpt_lan_priv *priv = netdev_priv(dev);
599	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
600	struct sk_buff *sent;
601	unsigned long flags;
602	u32 ctx;
603
604	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
605	sent = priv->SendCtl[ctx].skb;
606
607	priv->stats.tx_packets++;
608	priv->stats.tx_bytes += sent->len;
609
610	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
611			IOC_AND_NETDEV_NAMES_s_s(dev),
612			__FUNCTION__, sent));
613
614	priv->SendCtl[ctx].skb = NULL;
615	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
616			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
617	dev_kfree_skb_irq(sent);
618
619	spin_lock_irqsave(&priv->txfidx_lock, flags);
620	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
621	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
622
623	netif_wake_queue(dev);
624	return 0;
625}
626
627/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
628static int
629mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
630{
631	struct mpt_lan_priv *priv = netdev_priv(dev);
632	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
633	struct sk_buff *sent;
634	unsigned long flags;
635	int FreeReqFrame = 0;
636	u32 *pContext;
637	u32 ctx;
638	u8 count;
639
640	count = pSendRep->NumberOfContexts;
641
642	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
643		 le16_to_cpu(pSendRep->IOCStatus)));
644
645	/* Add check for Loginfo Flag in IOCStatus */
646
647	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
648	case MPI_IOCSTATUS_SUCCESS:
649		priv->stats.tx_packets += count;
650		break;
651
652	case MPI_IOCSTATUS_LAN_CANCELED:
653	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
654		break;
655
656	case MPI_IOCSTATUS_INVALID_SGL:
657		priv->stats.tx_errors += count;
658		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
659				IOC_AND_NETDEV_NAMES_s_s(dev));
660		goto out;
661
662	default:
663		priv->stats.tx_errors += count;
664		break;
665	}
666
667	pContext = &pSendRep->BufferContext;
668
669	spin_lock_irqsave(&priv->txfidx_lock, flags);
670	while (count > 0) {
671		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
672
673		sent = priv->SendCtl[ctx].skb;
674		priv->stats.tx_bytes += sent->len;
675
676		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
677				IOC_AND_NETDEV_NAMES_s_s(dev),
678				__FUNCTION__, sent));
679
680		priv->SendCtl[ctx].skb = NULL;
681		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
682				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
683		dev_kfree_skb_irq(sent);
684
685		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
686
687		pContext++;
688		count--;
689	}
690	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
691
692out:
693	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
694		FreeReqFrame = 1;
695
696	netif_wake_queue(dev);
697	return FreeReqFrame;
698}
699
700/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
701static int
702mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
703{
704	struct mpt_lan_priv *priv = netdev_priv(dev);
705	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
706	MPT_FRAME_HDR *mf;
707	LANSendRequest_t *pSendReq;
708	SGETransaction32_t *pTrans;
709	SGESimple64_t *pSimple;
710	const unsigned char *mac;
711	dma_addr_t dma;
712	unsigned long flags;
713	int ctx;
714	u16 cur_naa = 0x1000;
715
716	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
717			__FUNCTION__, skb));
718
719	spin_lock_irqsave(&priv->txfidx_lock, flags);
720	if (priv->mpt_txfidx_tail < 0) {
721		netif_stop_queue(dev);
722		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
723
724		printk (KERN_ERR "%s: no tx context available: %u\n",
725			__FUNCTION__, priv->mpt_txfidx_tail);
726		return 1;
727	}
728
729	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
730	if (mf == NULL) {
731		netif_stop_queue(dev);
732		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
733
734		printk (KERN_ERR "%s: Unable to alloc request frame\n",
735			__FUNCTION__);
736		return 1;
737	}
738
739	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
740	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
741
742//	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
743//			IOC_AND_NETDEV_NAMES_s_s(dev)));
744
745	pSendReq = (LANSendRequest_t *) mf;
746
747	/* Set the mac.raw pointer, since this apparently isn't getting
748	 * done before we get the skb. Pull the data pointer past the mac data.
749	 */
750	skb_reset_mac_header(skb);
751	skb_pull(skb, 12);
752
753        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
754			     PCI_DMA_TODEVICE);
755
756	priv->SendCtl[ctx].skb = skb;
757	priv->SendCtl[ctx].dma = dma;
758	priv->SendCtl[ctx].len = skb->len;
759
760	/* Message Header */
761	pSendReq->Reserved    = 0;
762	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
763	pSendReq->ChainOffset = 0;
764	pSendReq->Reserved2   = 0;
765	pSendReq->MsgFlags    = 0;
766	pSendReq->PortNumber  = priv->pnum;
767
768	/* Transaction Context Element */
769	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
770
771	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
772	pTrans->ContextSize   = sizeof(u32);
773	pTrans->DetailsLength = 2 * sizeof(u32);
774	pTrans->Flags         = 0;
775	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
776
777//	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
778//			IOC_AND_NETDEV_NAMES_s_s(dev),
779//			ctx, skb, skb->data));
780
781	mac = skb_mac_header(skb);
782#ifdef QLOGIC_NAA_WORKAROUND
783{
784	struct NAA_Hosed *nh;
785
786	/* Munge the NAA for Tx packets to QLogic boards, which don't follow
787	   RFC 2625. The longer I look at this, the more my opinion of Qlogic
788	   drops. */
789	read_lock_irq(&bad_naa_lock);
790	for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
791		if ((nh->ieee[0] == mac[0]) &&
792		    (nh->ieee[1] == mac[1]) &&
793		    (nh->ieee[2] == mac[2]) &&
794		    (nh->ieee[3] == mac[3]) &&
795		    (nh->ieee[4] == mac[4]) &&
796		    (nh->ieee[5] == mac[5])) {
797			cur_naa = nh->NAA;
798			dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
799				  "= %04x.\n", cur_naa));
800			break;
801		}
802	}
803	read_unlock_irq(&bad_naa_lock);
804}
805#endif
806
807	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
808						    (mac[0] <<  8) |
809						    (mac[1] <<  0));
810	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
811						    (mac[3] << 16) |
812						    (mac[4] <<  8) |
813						    (mac[5] <<  0));
814
815	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
816
817	/* If we ever decide to send more than one Simple SGE per LANSend, then
818	   we will need to make sure that LAST_ELEMENT only gets set on the
819	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
820	pSimple->FlagsLength = cpu_to_le32(
821			((MPI_SGE_FLAGS_LAST_ELEMENT |
822			  MPI_SGE_FLAGS_END_OF_BUFFER |
823			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
824			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
825			  MPI_SGE_FLAGS_HOST_TO_IOC |
826			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
827			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
828			skb->len);
829	pSimple->Address.Low = cpu_to_le32((u32) dma);
830	if (sizeof(dma_addr_t) > sizeof(u32))
831		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
832	else
833		pSimple->Address.High = 0;
834
835	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
836	dev->trans_start = jiffies;
837
838	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
839			IOC_AND_NETDEV_NAMES_s_s(dev),
840			le32_to_cpu(pSimple->FlagsLength)));
841
842	return 0;
843}
844
845/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
846static void
847mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
848/*
849 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
850 */
851{
852	struct mpt_lan_priv *priv = dev->priv;
853
854	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
855		if (priority) {
856			schedule_delayed_work(&priv->post_buckets_task, 0);
857		} else {
858			schedule_delayed_work(&priv->post_buckets_task, 1);
859			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
860				   "timer.\n"));
861		}
862	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
863			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
864	}
865}
866
867/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
868static int
869mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
870{
871	struct mpt_lan_priv *priv = dev->priv;
872
873	skb->protocol = mpt_lan_type_trans(skb, dev);
874
875	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
876		 "delivered to upper level.\n",
877			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
878
879	priv->stats.rx_bytes += skb->len;
880	priv->stats.rx_packets++;
881
882	skb->dev = dev;
883	netif_rx(skb);
884
885	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
886		 atomic_read(&priv->buckets_out)));
887
888	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
889		mpt_lan_wake_post_buckets_task(dev, 1);
890
891	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
892		  "remaining, %d received back since sod\n",
893		  atomic_read(&priv->buckets_out), priv->total_received));
894
895	return 0;
896}
897
898/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
899//static inline int
900static int
901mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
902{
903	struct mpt_lan_priv *priv = dev->priv;
904	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
905	struct sk_buff *skb, *old_skb;
906	unsigned long flags;
907	u32 ctx, len;
908
909	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
910	skb = priv->RcvCtl[ctx].skb;
911
912	len = GET_LAN_PACKET_LENGTH(tmsg);
913
914	if (len < MPT_LAN_RX_COPYBREAK) {
915		old_skb = skb;
916
917		skb = (struct sk_buff *)dev_alloc_skb(len);
918		if (!skb) {
919			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
920					IOC_AND_NETDEV_NAMES_s_s(dev),
921					__FILE__, __LINE__);
922			return -ENOMEM;
923		}
924
925		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
926					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
927
928		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
929
930		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
931					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
932		goto out;
933	}
934
935	skb_put(skb, len);
936
937	priv->RcvCtl[ctx].skb = NULL;
938
939	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
940			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
941
942out:
943	spin_lock_irqsave(&priv->rxfidx_lock, flags);
944	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
945	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
946
947	atomic_dec(&priv->buckets_out);
948	priv->total_received++;
949
950	return mpt_lan_receive_skb(dev, skb);
951}
952
953/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
954static int
955mpt_lan_receive_post_free(struct net_device *dev,
956			  LANReceivePostReply_t *pRecvRep)
957{
958	struct mpt_lan_priv *priv = dev->priv;
959	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
960	unsigned long flags;
961	struct sk_buff *skb;
962	u32 ctx;
963	int count;
964	int i;
965
966	count = pRecvRep->NumberOfContexts;
967
968/**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
969		  "IOC returned %d buckets, freeing them...\n", count));
970
971	spin_lock_irqsave(&priv->rxfidx_lock, flags);
972	for (i = 0; i < count; i++) {
973		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
974
975		skb = priv->RcvCtl[ctx].skb;
976
977//		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
978//				IOC_AND_NETDEV_NAMES_s_s(dev)));
979//		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
980//				priv, &(priv->buckets_out)));
981//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
982
983		priv->RcvCtl[ctx].skb = NULL;
984		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
985				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
986		dev_kfree_skb_any(skb);
987
988		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
989	}
990	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
991
992	atomic_sub(count, &priv->buckets_out);
993
994//	for (i = 0; i < priv->max_buckets_out; i++)
995//		if (priv->RcvCtl[i].skb != NULL)
996//			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
997//				  "is still out\n", i));
998
999/*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1000		  count));
1001*/
1002/**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1003/**/		  "remaining, %d received back since sod.\n",
1004/**/		  atomic_read(&priv->buckets_out), priv->total_received));
1005	return 0;
1006}
1007
1008/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1009static int
1010mpt_lan_receive_post_reply(struct net_device *dev,
1011			   LANReceivePostReply_t *pRecvRep)
1012{
1013	struct mpt_lan_priv *priv = dev->priv;
1014	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1015	struct sk_buff *skb, *old_skb;
1016	unsigned long flags;
1017	u32 len, ctx, offset;
1018	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1019	int count;
1020	int i, l;
1021
1022	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1023	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1024		 le16_to_cpu(pRecvRep->IOCStatus)));
1025
1026	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1027						MPI_IOCSTATUS_LAN_CANCELED)
1028		return mpt_lan_receive_post_free(dev, pRecvRep);
1029
1030	len = le32_to_cpu(pRecvRep->PacketLength);
1031	if (len == 0) {
1032		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1033			"ReceivePostReply w/ PacketLength zero!\n",
1034				IOC_AND_NETDEV_NAMES_s_s(dev));
1035		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1036				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1037		return -1;
1038	}
1039
1040	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1041	count  = pRecvRep->NumberOfContexts;
1042	skb    = priv->RcvCtl[ctx].skb;
1043
1044	offset = le32_to_cpu(pRecvRep->PacketOffset);
1045//	if (offset != 0) {
1046//		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1047//			"w/ PacketOffset %u\n",
1048//				IOC_AND_NETDEV_NAMES_s_s(dev),
1049//				offset);
1050//	}
1051
1052	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1053			IOC_AND_NETDEV_NAMES_s_s(dev),
1054			offset, len));
1055
1056	if (count > 1) {
1057		int szrem = len;
1058
1059//		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1060//			"for single packet, concatenating...\n",
1061//				IOC_AND_NETDEV_NAMES_s_s(dev)));
1062
1063		skb = (struct sk_buff *)dev_alloc_skb(len);
1064		if (!skb) {
1065			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1066					IOC_AND_NETDEV_NAMES_s_s(dev),
1067					__FILE__, __LINE__);
1068			return -ENOMEM;
1069		}
1070
1071		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1072		for (i = 0; i < count; i++) {
1073
1074			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1075			old_skb = priv->RcvCtl[ctx].skb;
1076
1077			l = priv->RcvCtl[ctx].len;
1078			if (szrem < l)
1079				l = szrem;
1080
1081//			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1082//					IOC_AND_NETDEV_NAMES_s_s(dev),
1083//					i, l));
1084
1085			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1086						    priv->RcvCtl[ctx].dma,
1087						    priv->RcvCtl[ctx].len,
1088						    PCI_DMA_FROMDEVICE);
1089			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1090
1091			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1092						       priv->RcvCtl[ctx].dma,
1093						       priv->RcvCtl[ctx].len,
1094						       PCI_DMA_FROMDEVICE);
1095
1096			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1097			szrem -= l;
1098		}
1099		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1100
1101	} else if (len < MPT_LAN_RX_COPYBREAK) {
1102
1103		old_skb = skb;
1104
1105		skb = (struct sk_buff *)dev_alloc_skb(len);
1106		if (!skb) {
1107			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1108					IOC_AND_NETDEV_NAMES_s_s(dev),
1109					__FILE__, __LINE__);
1110			return -ENOMEM;
1111		}
1112
1113		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1114					    priv->RcvCtl[ctx].dma,
1115					    priv->RcvCtl[ctx].len,
1116					    PCI_DMA_FROMDEVICE);
1117
1118		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1119
1120		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1121					       priv->RcvCtl[ctx].dma,
1122					       priv->RcvCtl[ctx].len,
1123					       PCI_DMA_FROMDEVICE);
1124
1125		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1126		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1127		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1128
1129	} else {
1130		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1131
1132		priv->RcvCtl[ctx].skb = NULL;
1133
1134		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1135				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1136		priv->RcvCtl[ctx].dma = 0;
1137
1138		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1139		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1140
1141		skb_put(skb,len);
1142	}
1143
1144	atomic_sub(count, &priv->buckets_out);
1145	priv->total_received += count;
1146
1147	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1148		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1149			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1150				IOC_AND_NETDEV_NAMES_s_s(dev),
1151				priv->mpt_rxfidx_tail,
1152				MPT_LAN_MAX_BUCKETS_OUT);
1153
1154		return -1;
1155	}
1156
1157	if (remaining == 0)
1158		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1159			"(priv->buckets_out = %d)\n",
1160			IOC_AND_NETDEV_NAMES_s_s(dev),
1161			atomic_read(&priv->buckets_out));
1162	else if (remaining < 10)
1163		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1164			"(priv->buckets_out = %d)\n",
1165			IOC_AND_NETDEV_NAMES_s_s(dev),
1166			remaining, atomic_read(&priv->buckets_out));
1167
1168	if ((remaining < priv->bucketthresh) &&
1169	    ((atomic_read(&priv->buckets_out) - remaining) >
1170	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1171
1172		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1173			"buckets_out count and fw's BucketsRemaining "
1174			"count has crossed the threshold, issuing a "
1175			"LanReset to clear the fw's hashtable. You may "
1176			"want to check your /var/log/messages for \"CRC "
1177			"error\" event notifications.\n");
1178
1179		mpt_lan_reset(dev);
1180		mpt_lan_wake_post_buckets_task(dev, 0);
1181	}
1182
1183	return mpt_lan_receive_skb(dev, skb);
1184}
1185
1186/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1187/* Simple SGE's only at the moment */
1188
1189static void
1190mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1191{
1192	struct net_device *dev = priv->dev;
1193	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1194	MPT_FRAME_HDR *mf;
1195	LANReceivePostRequest_t *pRecvReq;
1196	SGETransaction32_t *pTrans;
1197	SGESimple64_t *pSimple;
1198	struct sk_buff *skb;
1199	dma_addr_t dma;
1200	u32 curr, buckets, count, max;
1201	u32 len = (dev->mtu + dev->hard_header_len + 4);
1202	unsigned long flags;
1203	int i;
1204
1205	curr = atomic_read(&priv->buckets_out);
1206	buckets = (priv->max_buckets_out - curr);
1207
1208	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1209			IOC_AND_NETDEV_NAMES_s_s(dev),
1210			__FUNCTION__, buckets, curr));
1211
1212	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1213			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1214
1215	while (buckets) {
1216		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1217		if (mf == NULL) {
1218			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1219				__FUNCTION__);
1220			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1221				 __FUNCTION__, buckets));
1222			goto out;
1223		}
1224		pRecvReq = (LANReceivePostRequest_t *) mf;
1225
1226		count = buckets;
1227		if (count > max)
1228			count = max;
1229
1230		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1231		pRecvReq->ChainOffset = 0;
1232		pRecvReq->MsgFlags    = 0;
1233		pRecvReq->PortNumber  = priv->pnum;
1234
1235		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1236		pSimple = NULL;
1237
1238		for (i = 0; i < count; i++) {
1239			int ctx;
1240
1241			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1242			if (priv->mpt_rxfidx_tail < 0) {
1243				printk (KERN_ERR "%s: Can't alloc context\n",
1244					__FUNCTION__);
1245				spin_unlock_irqrestore(&priv->rxfidx_lock,
1246						       flags);
1247				break;
1248			}
1249
1250			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1251
1252			skb = priv->RcvCtl[ctx].skb;
1253			if (skb && (priv->RcvCtl[ctx].len != len)) {
1254				pci_unmap_single(mpt_dev->pcidev,
1255						 priv->RcvCtl[ctx].dma,
1256						 priv->RcvCtl[ctx].len,
1257						 PCI_DMA_FROMDEVICE);
1258				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1259				skb = priv->RcvCtl[ctx].skb = NULL;
1260			}
1261
1262			if (skb == NULL) {
1263				skb = dev_alloc_skb(len);
1264				if (skb == NULL) {
1265					printk (KERN_WARNING
1266						MYNAM "/%s: Can't alloc skb\n",
1267						__FUNCTION__);
1268					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1269					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1270					break;
1271				}
1272
1273				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1274						     len, PCI_DMA_FROMDEVICE);
1275
1276				priv->RcvCtl[ctx].skb = skb;
1277				priv->RcvCtl[ctx].dma = dma;
1278				priv->RcvCtl[ctx].len = len;
1279			}
1280
1281			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1282
1283			pTrans->ContextSize   = sizeof(u32);
1284			pTrans->DetailsLength = 0;
1285			pTrans->Flags         = 0;
1286			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1287
1288			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1289
1290			pSimple->FlagsLength = cpu_to_le32(
1291				((MPI_SGE_FLAGS_END_OF_BUFFER |
1292				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1293				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1294			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1295			if (sizeof(dma_addr_t) > sizeof(u32))
1296				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1297			else
1298				pSimple->Address.High = 0;
1299
1300			pTrans = (SGETransaction32_t *) (pSimple + 1);
1301		}
1302
1303		if (pSimple == NULL) {
1304/**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1305/**/				__FUNCTION__);
1306			mpt_free_msg_frame(mpt_dev, mf);
1307			goto out;
1308		}
1309
1310		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1311
1312		pRecvReq->BucketCount = cpu_to_le32(i);
1313
1314/*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1315 *	for (i = 0; i < j + 2; i ++)
1316 *	    printk (" %08x", le32_to_cpu(msg[i]));
1317 *	printk ("\n");
1318 */
1319
1320		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1321
1322		priv->total_posted += i;
1323		buckets -= i;
1324		atomic_add(i, &priv->buckets_out);
1325	}
1326
1327out:
1328	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1329		  __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1330	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1331	__FUNCTION__, priv->total_posted, priv->total_received));
1332
1333	clear_bit(0, &priv->post_buckets_active);
1334}
1335
1336static void
1337mpt_lan_post_receive_buckets_work(struct work_struct *work)
1338{
1339	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1340						  post_buckets_task.work));
1341}
1342
1343/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1344static struct net_device *
1345mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1346{
1347	struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1348	struct mpt_lan_priv *priv = NULL;
1349	u8 HWaddr[FC_ALEN], *a;
1350
1351	if (!dev)
1352		return NULL;
1353
1354	dev->mtu = MPT_LAN_MTU;
1355
1356	priv = netdev_priv(dev);
1357
1358	priv->dev = dev;
1359	priv->mpt_dev = mpt_dev;
1360	priv->pnum = pnum;
1361
1362	memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
1363	INIT_DELAYED_WORK(&priv->post_buckets_task,
1364			  mpt_lan_post_receive_buckets_work);
1365	priv->post_buckets_active = 0;
1366
1367	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1368			__LINE__, dev->mtu + dev->hard_header_len + 4));
1369
1370	atomic_set(&priv->buckets_out, 0);
1371	priv->total_posted = 0;
1372	priv->total_received = 0;
1373	priv->max_buckets_out = max_buckets_out;
1374	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1375		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1376
1377	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1378			__LINE__,
1379			mpt_dev->pfacts[0].MaxLanBuckets,
1380			max_buckets_out,
1381			priv->max_buckets_out));
1382
1383	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1384	spin_lock_init(&priv->txfidx_lock);
1385	spin_lock_init(&priv->rxfidx_lock);
1386
1387	memset(&priv->stats, 0, sizeof(priv->stats));
1388
1389	/*  Grab pre-fetched LANPage1 stuff. :-) */
1390	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1391
1392	HWaddr[0] = a[5];
1393	HWaddr[1] = a[4];
1394	HWaddr[2] = a[3];
1395	HWaddr[3] = a[2];
1396	HWaddr[4] = a[1];
1397	HWaddr[5] = a[0];
1398
1399	dev->addr_len = FC_ALEN;
1400	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1401	memset(dev->broadcast, 0xff, FC_ALEN);
1402
1403	/* The Tx queue is 127 deep on the 909.
1404	 * Give ourselves some breathing room.
1405	 */
1406	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1407			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1408
1409	dev->open = mpt_lan_open;
1410	dev->stop = mpt_lan_close;
1411	dev->get_stats = mpt_lan_get_stats;
1412	dev->set_multicast_list = NULL;
1413	dev->change_mtu = mpt_lan_change_mtu;
1414	dev->hard_start_xmit = mpt_lan_sdu_send;
1415
1416/* Not in 2.3.42. Need 2.3.45+ */
1417	dev->tx_timeout = mpt_lan_tx_timeout;
1418	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1419
1420	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1421		"and setting initial values\n"));
1422
1423	SET_MODULE_OWNER(dev);
1424
1425	if (register_netdev(dev) != 0) {
1426		free_netdev(dev);
1427		dev = NULL;
1428	}
1429	return dev;
1430}
1431
1432static int
1433mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1434{
1435	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1436	struct net_device	*dev;
1437	int			i;
1438
1439	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1440		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1441		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1442		       ioc->name, ioc->pfacts[i].PortNumber,
1443		       ioc->pfacts[i].ProtocolFlags,
1444		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1445			       ioc->pfacts[i].ProtocolFlags));
1446
1447		if (!(ioc->pfacts[i].ProtocolFlags &
1448					MPI_PORTFACTS_PROTOCOL_LAN)) {
1449			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1450			       "seems to be disabled on this adapter port!\n",
1451			       ioc->name);
1452			continue;
1453		}
1454
1455		dev = mpt_register_lan_device(ioc, i);
1456		if (!dev) {
1457			printk(KERN_ERR MYNAM ": %s: Unable to register "
1458			       "port%d as a LAN device\n", ioc->name,
1459			       ioc->pfacts[i].PortNumber);
1460			continue;
1461		}
1462
1463		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1464		       "registered as '%s'\n", ioc->name, dev->name);
1465		printk(KERN_INFO MYNAM ": %s/%s: "
1466		       "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1467		       IOC_AND_NETDEV_NAMES_s_s(dev),
1468		       dev->dev_addr[0], dev->dev_addr[1],
1469		       dev->dev_addr[2], dev->dev_addr[3],
1470		       dev->dev_addr[4], dev->dev_addr[5]);
1471
1472		ioc->netdev = dev;
1473
1474		return 0;
1475	}
1476
1477	return -ENODEV;
1478}
1479
1480static void
1481mptlan_remove(struct pci_dev *pdev)
1482{
1483	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1484	struct net_device	*dev = ioc->netdev;
1485
1486	if(dev != NULL) {
1487		unregister_netdev(dev);
1488		free_netdev(dev);
1489	}
1490}
1491
1492static struct mpt_pci_driver mptlan_driver = {
1493	.probe		= mptlan_probe,
1494	.remove		= mptlan_remove,
1495};
1496
1497static int __init mpt_lan_init (void)
1498{
1499	show_mptmod_ver(LANAME, LANVER);
1500
1501	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1502		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1503		return -EBUSY;
1504	}
1505
1506	/* Set the callback index to be used by driver core for turbo replies */
1507	mpt_lan_index = LanCtx;
1508
1509	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1510
1511	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1512		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1513		       "handler with mptbase! The world is at an end! "
1514		       "Everything is fading to black! Goodbye.\n");
1515		return -EBUSY;
1516	}
1517
1518	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1519
1520	if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1521		dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1522	return 0;
1523}
1524
1525static void __exit mpt_lan_exit(void)
1526{
1527	mpt_device_driver_deregister(MPTLAN_DRIVER);
1528	mpt_reset_deregister(LanCtx);
1529
1530	if (LanCtx >= 0) {
1531		mpt_deregister(LanCtx);
1532		LanCtx = -1;
1533		mpt_lan_index = 0;
1534	}
1535}
1536
1537module_init(mpt_lan_init);
1538module_exit(mpt_lan_exit);
1539
1540/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1541static unsigned short
1542mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1543{
1544	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1545	struct fcllc *fcllc;
1546
1547	skb_reset_mac_header(skb);
1548	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1549
1550	if (fch->dtype == htons(0xffff)) {
1551		u32 *p = (u32 *) fch;
1552
1553		swab32s(p + 0);
1554		swab32s(p + 1);
1555		swab32s(p + 2);
1556		swab32s(p + 3);
1557
1558		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1559				NETDEV_PTR_TO_IOC_NAME_s(dev));
1560		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1561				fch->saddr[0], fch->saddr[1], fch->saddr[2],
1562				fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1563	}
1564
1565	if (*fch->daddr & 1) {
1566		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1567			skb->pkt_type = PACKET_BROADCAST;
1568		} else {
1569			skb->pkt_type = PACKET_MULTICAST;
1570		}
1571	} else {
1572		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1573			skb->pkt_type = PACKET_OTHERHOST;
1574		} else {
1575			skb->pkt_type = PACKET_HOST;
1576		}
1577	}
1578
1579	fcllc = (struct fcllc *)skb->data;
1580
1581#ifdef QLOGIC_NAA_WORKAROUND
1582{
1583	u16 source_naa = fch->stype, found = 0;
1584
1585
1586	if ((source_naa & 0xF000) == 0)
1587		source_naa = swab16(source_naa);
1588
1589	if (fcllc->ethertype == htons(ETH_P_ARP))
1590	    dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1591		      "%04x.\n", source_naa));
1592
1593	if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1594	   ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1595		struct NAA_Hosed *nh, *prevnh;
1596		int i;
1597
1598		dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1599			  "system with non-RFC 2625 NAA value (%04x).\n",
1600			  source_naa));
1601
1602		write_lock_irq(&bad_naa_lock);
1603		for (prevnh = nh = mpt_bad_naa; nh != NULL;
1604		     prevnh=nh, nh=nh->next) {
1605			if ((nh->ieee[0] == fch->saddr[0]) &&
1606			    (nh->ieee[1] == fch->saddr[1]) &&
1607			    (nh->ieee[2] == fch->saddr[2]) &&
1608			    (nh->ieee[3] == fch->saddr[3]) &&
1609			    (nh->ieee[4] == fch->saddr[4]) &&
1610			    (nh->ieee[5] == fch->saddr[5])) {
1611				found = 1;
1612				dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1613					 "q/Rep w/ bad NAA from system already"
1614					 " in DB.\n"));
1615				break;
1616			}
1617		}
1618
1619		if ((!found) && (nh == NULL)) {
1620
1621			nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1622			dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1623				 " bad NAA from system not yet in DB.\n"));
1624
1625			if (nh != NULL) {
1626				nh->next = NULL;
1627				if (!mpt_bad_naa)
1628					mpt_bad_naa = nh;
1629				if (prevnh)
1630					prevnh->next = nh;
1631
1632				nh->NAA = source_naa; /* Set the S_NAA value. */
1633				for (i = 0; i < FC_ALEN; i++)
1634					nh->ieee[i] = fch->saddr[i];
1635				dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1636					  "%02x:%02x with non-compliant S_NAA value.\n",
1637					  fch->saddr[0], fch->saddr[1], fch->saddr[2],
1638					  fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1639			} else {
1640				printk (KERN_ERR "mptlan/type_trans: Unable to"
1641					" kmalloc a NAA_Hosed struct.\n");
1642			}
1643		} else if (!found) {
1644			printk (KERN_ERR "mptlan/type_trans: found not"
1645				" set, but nh isn't null. Evil "
1646				"funkiness abounds.\n");
1647		}
1648		write_unlock_irq(&bad_naa_lock);
1649	}
1650}
1651#endif
1652
1653	/* Strip the SNAP header from ARP packets since we don't
1654	 * pass them through to the 802.2/SNAP layers.
1655	 */
1656	if (fcllc->dsap == EXTENDED_SAP &&
1657		(fcllc->ethertype == htons(ETH_P_IP) ||
1658		 fcllc->ethertype == htons(ETH_P_ARP))) {
1659		skb_pull(skb, sizeof(struct fcllc));
1660		return fcllc->ethertype;
1661	}
1662
1663	return htons(ETH_P_802_2);
1664}
1665
1666/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1667