• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/message/fusion/
1/*
2 *  linux/drivers/message/fusion/mptlan.c
3 *      IP Over Fibre Channel device driver.
4 *      For use with LSI Fibre Channel PCI chip/adapters
5 *      running LSI Fusion MPT (Message Passing Technology) firmware.
6 *
7 *  Copyright (c) 2000-2008 LSI Corporation
8 *  (mailto:DL-MPTFusionLinux@lsi.com)
9 *
10 */
11/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12/*
13    This program is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; version 2 of the License.
16
17    This program is distributed in the hope that it will be useful,
18    but WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20    GNU General Public License for more details.
21
22    NO WARRANTY
23    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27    solely responsible for determining the appropriateness of using and
28    distributing the Program and assumes all risks associated with its
29    exercise of rights under this Agreement, including but not limited to
30    the risks and costs of program errors, damage to or loss of data,
31    programs or equipment, and unavailability or interruption of operations.
32
33    DISCLAIMER OF LIABILITY
34    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41
42    You should have received a copy of the GNU General Public License
43    along with this program; if not, write to the Free Software
44    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45*/
46
47/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48/*
49 * Define statements used for debugging
50 */
51//#define MPT_LAN_IO_DEBUG
52
53/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54
55#include "mptlan.h"
56#include <linux/init.h>
57#include <linux/module.h>
58#include <linux/fs.h>
59#include <linux/sched.h>
60#include <linux/slab.h>
61
62#define my_VERSION	MPT_LINUX_VERSION_COMMON
63#define MYNAM		"mptlan"
64
65MODULE_LICENSE("GPL");
66MODULE_VERSION(my_VERSION);
67
68/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69/*
70 * MPT LAN message sizes without variable part.
71 */
72#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74
75#define MPT_LAN_TRANSACTION32_SIZE \
76	(sizeof(SGETransaction32_t) - sizeof(u32))
77
78/*
79 *  Fusion MPT LAN private structures
80 */
81
82struct BufferControl {
83	struct sk_buff	*skb;
84	dma_addr_t	dma;
85	unsigned int	len;
86};
87
88struct mpt_lan_priv {
89	MPT_ADAPTER *mpt_dev;
90	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
91
92	atomic_t buckets_out;		/* number of unused buckets on IOC */
93	int bucketthresh;		/* Send more when this many left */
94
95	int *mpt_txfidx; /* Free Tx Context list */
96	int mpt_txfidx_tail;
97	spinlock_t txfidx_lock;
98
99	int *mpt_rxfidx; /* Free Rx Context list */
100	int mpt_rxfidx_tail;
101	spinlock_t rxfidx_lock;
102
103	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
104	struct BufferControl *SendCtl;	/* Send BufferControl structs */
105
106	int max_buckets_out;		/* Max buckets to send to IOC */
107	int tx_max_out;			/* IOC's Tx queue len */
108
109	u32 total_posted;
110	u32 total_received;
111
112	struct delayed_work post_buckets_task;
113	struct net_device *dev;
114	unsigned long post_buckets_active;
115};
116
117struct mpt_lan_ohdr {
118	u16	dtype;
119	u8	daddr[FC_ALEN];
120	u16	stype;
121	u8	saddr[FC_ALEN];
122};
123
124/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
125
126/*
127 *  Forward protos...
128 */
129static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
130		       MPT_FRAME_HDR *reply);
131static int  mpt_lan_open(struct net_device *dev);
132static int  mpt_lan_reset(struct net_device *dev);
133static int  mpt_lan_close(struct net_device *dev);
134static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
135static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
136					   int priority);
137static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
138static int  mpt_lan_receive_post_reply(struct net_device *dev,
139				       LANReceivePostReply_t *pRecvRep);
140static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
141static int  mpt_lan_send_reply(struct net_device *dev,
142			       LANSendReply_t *pSendRep);
143static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
144static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
145static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
146					 struct net_device *dev);
147
148/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
149/*
150 *  Fusion MPT LAN private data
151 */
152static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
153
154static u32 max_buckets_out = 127;
155static u32 tx_max_out_p = 127 - 16;
156
157/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
158/**
159 *	lan_reply - Handle all data sent from the hardware.
160 *	@ioc: Pointer to MPT_ADAPTER structure
161 *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
162 *	@reply: Pointer to MPT reply frame
163 *
164 *	Returns 1 indicating original alloc'd request frame ptr
165 *	should be freed, or 0 if it shouldn't.
166 */
167static int
168lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
169{
170	struct net_device *dev = ioc->netdev;
171	int FreeReqFrame = 0;
172
173	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
174		  IOC_AND_NETDEV_NAMES_s_s(dev)));
175
176//	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
177//			mf, reply));
178
179	if (mf == NULL) {
180		u32 tmsg = CAST_PTR_TO_U32(reply);
181
182		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
183				IOC_AND_NETDEV_NAMES_s_s(dev),
184				tmsg));
185
186		switch (GET_LAN_FORM(tmsg)) {
187
188		// NOTE!  (Optimization) First case here is now caught in
189		//  mptbase.c::mpt_interrupt() routine and callcack here
190		//  is now skipped for this case!
191
192		case LAN_REPLY_FORM_SEND_SINGLE:
193//			dioprintk((MYNAM "/lan_reply: "
194//				  "calling mpt_lan_send_reply (turbo)\n"));
195
196			// Potential BUG here?
197			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
198			//  If/when mpt_lan_send_turbo would return 1 here,
199			//  calling routine (mptbase.c|mpt_interrupt)
200			//  would Oops because mf has already been set
201			//  to NULL.  So after return from this func,
202			//  mpt_interrupt() will attempt to put (NULL) mf ptr
203			//  item back onto its adapter FreeQ - Oops!:-(
204			//  It's Ok, since mpt_lan_send_turbo() *currently*
205			//  always returns 0, but..., just in case:
206
207			(void) mpt_lan_send_turbo(dev, tmsg);
208			FreeReqFrame = 0;
209
210			break;
211
212		case LAN_REPLY_FORM_RECEIVE_SINGLE:
213//			dioprintk((KERN_INFO MYNAM "@lan_reply: "
214//				  "rcv-Turbo = %08x\n", tmsg));
215			mpt_lan_receive_post_turbo(dev, tmsg);
216			break;
217
218		default:
219			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
220				"that I don't know what to do with\n");
221
222			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
223
224			break;
225		}
226
227		return FreeReqFrame;
228	}
229
230//	msg = (u32 *) reply;
231//	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
232//		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
233//		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
234//	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
235//		  reply->u.hdr.Function));
236
237	switch (reply->u.hdr.Function) {
238
239	case MPI_FUNCTION_LAN_SEND:
240	{
241		LANSendReply_t *pSendRep;
242
243		pSendRep = (LANSendReply_t *) reply;
244		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
245		break;
246	}
247
248	case MPI_FUNCTION_LAN_RECEIVE:
249	{
250		LANReceivePostReply_t *pRecvRep;
251
252		pRecvRep = (LANReceivePostReply_t *) reply;
253		if (pRecvRep->NumberOfContexts) {
254			mpt_lan_receive_post_reply(dev, pRecvRep);
255			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
256				FreeReqFrame = 1;
257		} else
258			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
259				  "ReceivePostReply received.\n"));
260		break;
261	}
262
263	case MPI_FUNCTION_LAN_RESET:
264		/* Just a default reply. Might want to check it to
265		 * make sure that everything went ok.
266		 */
267		FreeReqFrame = 1;
268		break;
269
270	case MPI_FUNCTION_EVENT_NOTIFICATION:
271	case MPI_FUNCTION_EVENT_ACK:
272		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
273		 *  Should be routed to mpt_lan_event_process(), but just in case...
274		 */
275		FreeReqFrame = 1;
276		break;
277
278	default:
279		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
280			"reply that I don't know what to do with\n");
281
282		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
283		FreeReqFrame = 1;
284
285		break;
286	}
287
288	return FreeReqFrame;
289}
290
291/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
292static int
293mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
294{
295	struct net_device *dev = ioc->netdev;
296	struct mpt_lan_priv *priv;
297
298	if (dev == NULL)
299		return(1);
300	else
301		priv = netdev_priv(dev);
302
303	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
304			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
305			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
306
307	if (priv->mpt_rxfidx == NULL)
308		return (1);
309
310	if (reset_phase == MPT_IOC_SETUP_RESET) {
311		;
312	} else if (reset_phase == MPT_IOC_PRE_RESET) {
313		int i;
314		unsigned long flags;
315
316		netif_stop_queue(dev);
317
318		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
319
320		atomic_set(&priv->buckets_out, 0);
321
322		/* Reset Rx Free Tail index and re-populate the queue. */
323		spin_lock_irqsave(&priv->rxfidx_lock, flags);
324		priv->mpt_rxfidx_tail = -1;
325		for (i = 0; i < priv->max_buckets_out; i++)
326			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
327		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
328	} else {
329		mpt_lan_post_receive_buckets(priv);
330		netif_wake_queue(dev);
331	}
332
333	return 1;
334}
335
336/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
337static int
338mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
339{
340	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
341
342	switch (le32_to_cpu(pEvReply->Event)) {
343	case MPI_EVENT_NONE:				/* 00 */
344	case MPI_EVENT_LOG_DATA:			/* 01 */
345	case MPI_EVENT_STATE_CHANGE:			/* 02 */
346	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
347	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
348	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
349	case MPI_EVENT_RESCAN:				/* 06 */
350		/* Ok, do we need to do anything here? As far as
351		   I can tell, this is when a new device gets added
352		   to the loop. */
353	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
354	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
355	case MPI_EVENT_LOGOUT:				/* 09 */
356	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
357	default:
358		break;
359	}
360
361	/*
362	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
363	 *  Do NOT do it here now!
364	 */
365
366	return 1;
367}
368
369/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
370static int
371mpt_lan_open(struct net_device *dev)
372{
373	struct mpt_lan_priv *priv = netdev_priv(dev);
374	int i;
375
376	if (mpt_lan_reset(dev) != 0) {
377		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
378
379		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
380
381		if (mpt_dev->active)
382			printk ("The ioc is active. Perhaps it needs to be"
383				" reset?\n");
384		else
385			printk ("The ioc in inactive, most likely in the "
386				"process of being reset. Please try again in "
387				"a moment.\n");
388	}
389
390	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
391	if (priv->mpt_txfidx == NULL)
392		goto out;
393	priv->mpt_txfidx_tail = -1;
394
395	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
396				GFP_KERNEL);
397	if (priv->SendCtl == NULL)
398		goto out_mpt_txfidx;
399	for (i = 0; i < priv->tx_max_out; i++)
400		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
401
402	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
403
404	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
405				   GFP_KERNEL);
406	if (priv->mpt_rxfidx == NULL)
407		goto out_SendCtl;
408	priv->mpt_rxfidx_tail = -1;
409
410	priv->RcvCtl = kcalloc(priv->max_buckets_out,
411			       sizeof(struct BufferControl),
412			       GFP_KERNEL);
413	if (priv->RcvCtl == NULL)
414		goto out_mpt_rxfidx;
415	for (i = 0; i < priv->max_buckets_out; i++)
416		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
417
418/**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
419/**/	for (i = 0; i < priv->tx_max_out; i++)
420/**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
421/**/	dlprintk(("\n"));
422
423	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
424
425	mpt_lan_post_receive_buckets(priv);
426	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
427			IOC_AND_NETDEV_NAMES_s_s(dev));
428
429	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
430		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
431			" Notifications. This is a bad thing! We're not going "
432			"to go ahead, but I'd be leery of system stability at "
433			"this point.\n");
434	}
435
436	netif_start_queue(dev);
437	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
438
439	return 0;
440out_mpt_rxfidx:
441	kfree(priv->mpt_rxfidx);
442	priv->mpt_rxfidx = NULL;
443out_SendCtl:
444	kfree(priv->SendCtl);
445	priv->SendCtl = NULL;
446out_mpt_txfidx:
447	kfree(priv->mpt_txfidx);
448	priv->mpt_txfidx = NULL;
449out:	return -ENOMEM;
450}
451
452/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
453/* Send a LanReset message to the FW. This should result in the FW returning
454   any buckets it still has. */
455static int
456mpt_lan_reset(struct net_device *dev)
457{
458	MPT_FRAME_HDR *mf;
459	LANResetRequest_t *pResetReq;
460	struct mpt_lan_priv *priv = netdev_priv(dev);
461
462	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
463
464	if (mf == NULL) {
465/*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
466		"Unable to allocate a request frame.\n"));
467*/
468		return -1;
469	}
470
471	pResetReq = (LANResetRequest_t *) mf;
472
473	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
474	pResetReq->ChainOffset	= 0;
475	pResetReq->Reserved	= 0;
476	pResetReq->PortNumber	= priv->pnum;
477	pResetReq->MsgFlags	= 0;
478	pResetReq->Reserved2	= 0;
479
480	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
481
482	return 0;
483}
484
485/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
486static int
487mpt_lan_close(struct net_device *dev)
488{
489	struct mpt_lan_priv *priv = netdev_priv(dev);
490	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
491	unsigned long timeout;
492	int i;
493
494	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
495
496	mpt_event_deregister(LanCtx);
497
498	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
499		  "since driver was loaded, %d still out\n",
500		  priv->total_posted,atomic_read(&priv->buckets_out)));
501
502	netif_stop_queue(dev);
503
504	mpt_lan_reset(dev);
505
506	timeout = jiffies + 2 * HZ;
507	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
508		schedule_timeout_interruptible(1);
509
510	for (i = 0; i < priv->max_buckets_out; i++) {
511		if (priv->RcvCtl[i].skb != NULL) {
512/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
513/**/				  "is still out\n", i));
514			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
515					 priv->RcvCtl[i].len,
516					 PCI_DMA_FROMDEVICE);
517			dev_kfree_skb(priv->RcvCtl[i].skb);
518		}
519	}
520
521	kfree(priv->RcvCtl);
522	kfree(priv->mpt_rxfidx);
523
524	for (i = 0; i < priv->tx_max_out; i++) {
525		if (priv->SendCtl[i].skb != NULL) {
526			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
527					 priv->SendCtl[i].len,
528					 PCI_DMA_TODEVICE);
529			dev_kfree_skb(priv->SendCtl[i].skb);
530		}
531	}
532
533	kfree(priv->SendCtl);
534	kfree(priv->mpt_txfidx);
535
536	atomic_set(&priv->buckets_out, 0);
537
538	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
539			IOC_AND_NETDEV_NAMES_s_s(dev));
540
541	return 0;
542}
543
544/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
545static int
546mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
547{
548	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
549		return -EINVAL;
550	dev->mtu = new_mtu;
551	return 0;
552}
553
554/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
555/* Tx timeout handler. */
556static void
557mpt_lan_tx_timeout(struct net_device *dev)
558{
559	struct mpt_lan_priv *priv = netdev_priv(dev);
560	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
561
562	if (mpt_dev->active) {
563		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
564		netif_wake_queue(dev);
565	}
566}
567
568/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
569//static inline int
570static int
571mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
572{
573	struct mpt_lan_priv *priv = netdev_priv(dev);
574	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
575	struct sk_buff *sent;
576	unsigned long flags;
577	u32 ctx;
578
579	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
580	sent = priv->SendCtl[ctx].skb;
581
582	dev->stats.tx_packets++;
583	dev->stats.tx_bytes += sent->len;
584
585	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
586			IOC_AND_NETDEV_NAMES_s_s(dev),
587			__func__, sent));
588
589	priv->SendCtl[ctx].skb = NULL;
590	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
591			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
592	dev_kfree_skb_irq(sent);
593
594	spin_lock_irqsave(&priv->txfidx_lock, flags);
595	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
596	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
597
598	netif_wake_queue(dev);
599	return 0;
600}
601
602/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
603static int
604mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
605{
606	struct mpt_lan_priv *priv = netdev_priv(dev);
607	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
608	struct sk_buff *sent;
609	unsigned long flags;
610	int FreeReqFrame = 0;
611	u32 *pContext;
612	u32 ctx;
613	u8 count;
614
615	count = pSendRep->NumberOfContexts;
616
617	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
618		 le16_to_cpu(pSendRep->IOCStatus)));
619
620	/* Add check for Loginfo Flag in IOCStatus */
621
622	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
623	case MPI_IOCSTATUS_SUCCESS:
624		dev->stats.tx_packets += count;
625		break;
626
627	case MPI_IOCSTATUS_LAN_CANCELED:
628	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
629		break;
630
631	case MPI_IOCSTATUS_INVALID_SGL:
632		dev->stats.tx_errors += count;
633		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
634				IOC_AND_NETDEV_NAMES_s_s(dev));
635		goto out;
636
637	default:
638		dev->stats.tx_errors += count;
639		break;
640	}
641
642	pContext = &pSendRep->BufferContext;
643
644	spin_lock_irqsave(&priv->txfidx_lock, flags);
645	while (count > 0) {
646		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
647
648		sent = priv->SendCtl[ctx].skb;
649		dev->stats.tx_bytes += sent->len;
650
651		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
652				IOC_AND_NETDEV_NAMES_s_s(dev),
653				__func__, sent));
654
655		priv->SendCtl[ctx].skb = NULL;
656		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
657				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
658		dev_kfree_skb_irq(sent);
659
660		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
661
662		pContext++;
663		count--;
664	}
665	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
666
667out:
668	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
669		FreeReqFrame = 1;
670
671	netif_wake_queue(dev);
672	return FreeReqFrame;
673}
674
675/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
676static int
677mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
678{
679	struct mpt_lan_priv *priv = netdev_priv(dev);
680	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
681	MPT_FRAME_HDR *mf;
682	LANSendRequest_t *pSendReq;
683	SGETransaction32_t *pTrans;
684	SGESimple64_t *pSimple;
685	const unsigned char *mac;
686	dma_addr_t dma;
687	unsigned long flags;
688	int ctx;
689	u16 cur_naa = 0x1000;
690
691	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
692			__func__, skb));
693
694	spin_lock_irqsave(&priv->txfidx_lock, flags);
695	if (priv->mpt_txfidx_tail < 0) {
696		netif_stop_queue(dev);
697		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
698
699		printk (KERN_ERR "%s: no tx context available: %u\n",
700			__func__, priv->mpt_txfidx_tail);
701		return NETDEV_TX_BUSY;
702	}
703
704	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
705	if (mf == NULL) {
706		netif_stop_queue(dev);
707		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
708
709		printk (KERN_ERR "%s: Unable to alloc request frame\n",
710			__func__);
711		return NETDEV_TX_BUSY;
712	}
713
714	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
715	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
716
717//	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
718//			IOC_AND_NETDEV_NAMES_s_s(dev)));
719
720	pSendReq = (LANSendRequest_t *) mf;
721
722	/* Set the mac.raw pointer, since this apparently isn't getting
723	 * done before we get the skb. Pull the data pointer past the mac data.
724	 */
725	skb_reset_mac_header(skb);
726	skb_pull(skb, 12);
727
728        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
729			     PCI_DMA_TODEVICE);
730
731	priv->SendCtl[ctx].skb = skb;
732	priv->SendCtl[ctx].dma = dma;
733	priv->SendCtl[ctx].len = skb->len;
734
735	/* Message Header */
736	pSendReq->Reserved    = 0;
737	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
738	pSendReq->ChainOffset = 0;
739	pSendReq->Reserved2   = 0;
740	pSendReq->MsgFlags    = 0;
741	pSendReq->PortNumber  = priv->pnum;
742
743	/* Transaction Context Element */
744	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
745
746	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
747	pTrans->ContextSize   = sizeof(u32);
748	pTrans->DetailsLength = 2 * sizeof(u32);
749	pTrans->Flags         = 0;
750	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
751
752//	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
753//			IOC_AND_NETDEV_NAMES_s_s(dev),
754//			ctx, skb, skb->data));
755
756	mac = skb_mac_header(skb);
757
758	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
759						    (mac[0] <<  8) |
760						    (mac[1] <<  0));
761	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
762						    (mac[3] << 16) |
763						    (mac[4] <<  8) |
764						    (mac[5] <<  0));
765
766	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
767
768	/* If we ever decide to send more than one Simple SGE per LANSend, then
769	   we will need to make sure that LAST_ELEMENT only gets set on the
770	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
771	pSimple->FlagsLength = cpu_to_le32(
772			((MPI_SGE_FLAGS_LAST_ELEMENT |
773			  MPI_SGE_FLAGS_END_OF_BUFFER |
774			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
775			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
776			  MPI_SGE_FLAGS_HOST_TO_IOC |
777			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
778			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
779			skb->len);
780	pSimple->Address.Low = cpu_to_le32((u32) dma);
781	if (sizeof(dma_addr_t) > sizeof(u32))
782		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
783	else
784		pSimple->Address.High = 0;
785
786	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
787	dev->trans_start = jiffies;
788
789	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
790			IOC_AND_NETDEV_NAMES_s_s(dev),
791			le32_to_cpu(pSimple->FlagsLength)));
792
793	return NETDEV_TX_OK;
794}
795
796/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
797static void
798mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
799/*
800 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
801 */
802{
803	struct mpt_lan_priv *priv = netdev_priv(dev);
804
805	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
806		if (priority) {
807			schedule_delayed_work(&priv->post_buckets_task, 0);
808		} else {
809			schedule_delayed_work(&priv->post_buckets_task, 1);
810			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
811				   "timer.\n"));
812		}
813	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
814			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
815	}
816}
817
818/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
819static int
820mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
821{
822	struct mpt_lan_priv *priv = netdev_priv(dev);
823
824	skb->protocol = mpt_lan_type_trans(skb, dev);
825
826	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
827		 "delivered to upper level.\n",
828			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
829
830	dev->stats.rx_bytes += skb->len;
831	dev->stats.rx_packets++;
832
833	skb->dev = dev;
834	netif_rx(skb);
835
836	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
837		 atomic_read(&priv->buckets_out)));
838
839	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
840		mpt_lan_wake_post_buckets_task(dev, 1);
841
842	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
843		  "remaining, %d received back since sod\n",
844		  atomic_read(&priv->buckets_out), priv->total_received));
845
846	return 0;
847}
848
849/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
850//static inline int
851static int
852mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
853{
854	struct mpt_lan_priv *priv = netdev_priv(dev);
855	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
856	struct sk_buff *skb, *old_skb;
857	unsigned long flags;
858	u32 ctx, len;
859
860	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
861	skb = priv->RcvCtl[ctx].skb;
862
863	len = GET_LAN_PACKET_LENGTH(tmsg);
864
865	if (len < MPT_LAN_RX_COPYBREAK) {
866		old_skb = skb;
867
868		skb = (struct sk_buff *)dev_alloc_skb(len);
869		if (!skb) {
870			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
871					IOC_AND_NETDEV_NAMES_s_s(dev),
872					__FILE__, __LINE__);
873			return -ENOMEM;
874		}
875
876		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
877					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
878
879		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
880
881		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
882					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
883		goto out;
884	}
885
886	skb_put(skb, len);
887
888	priv->RcvCtl[ctx].skb = NULL;
889
890	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
891			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
892
893out:
894	spin_lock_irqsave(&priv->rxfidx_lock, flags);
895	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
896	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
897
898	atomic_dec(&priv->buckets_out);
899	priv->total_received++;
900
901	return mpt_lan_receive_skb(dev, skb);
902}
903
904/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
905static int
906mpt_lan_receive_post_free(struct net_device *dev,
907			  LANReceivePostReply_t *pRecvRep)
908{
909	struct mpt_lan_priv *priv = netdev_priv(dev);
910	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
911	unsigned long flags;
912	struct sk_buff *skb;
913	u32 ctx;
914	int count;
915	int i;
916
917	count = pRecvRep->NumberOfContexts;
918
919/**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
920		  "IOC returned %d buckets, freeing them...\n", count));
921
922	spin_lock_irqsave(&priv->rxfidx_lock, flags);
923	for (i = 0; i < count; i++) {
924		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
925
926		skb = priv->RcvCtl[ctx].skb;
927
928//		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
929//				IOC_AND_NETDEV_NAMES_s_s(dev)));
930//		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
931//				priv, &(priv->buckets_out)));
932//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
933
934		priv->RcvCtl[ctx].skb = NULL;
935		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
936				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
937		dev_kfree_skb_any(skb);
938
939		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
940	}
941	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
942
943	atomic_sub(count, &priv->buckets_out);
944
945//	for (i = 0; i < priv->max_buckets_out; i++)
946//		if (priv->RcvCtl[i].skb != NULL)
947//			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
948//				  "is still out\n", i));
949
950/*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
951		  count));
952*/
953/**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
954/**/		  "remaining, %d received back since sod.\n",
955/**/		  atomic_read(&priv->buckets_out), priv->total_received));
956	return 0;
957}
958
959/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
960static int
961mpt_lan_receive_post_reply(struct net_device *dev,
962			   LANReceivePostReply_t *pRecvRep)
963{
964	struct mpt_lan_priv *priv = netdev_priv(dev);
965	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
966	struct sk_buff *skb, *old_skb;
967	unsigned long flags;
968	u32 len, ctx, offset;
969	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
970	int count;
971	int i, l;
972
973	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
974	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
975		 le16_to_cpu(pRecvRep->IOCStatus)));
976
977	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
978						MPI_IOCSTATUS_LAN_CANCELED)
979		return mpt_lan_receive_post_free(dev, pRecvRep);
980
981	len = le32_to_cpu(pRecvRep->PacketLength);
982	if (len == 0) {
983		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
984			"ReceivePostReply w/ PacketLength zero!\n",
985				IOC_AND_NETDEV_NAMES_s_s(dev));
986		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
987				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
988		return -1;
989	}
990
991	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
992	count  = pRecvRep->NumberOfContexts;
993	skb    = priv->RcvCtl[ctx].skb;
994
995	offset = le32_to_cpu(pRecvRep->PacketOffset);
996//	if (offset != 0) {
997//		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
998//			"w/ PacketOffset %u\n",
999//				IOC_AND_NETDEV_NAMES_s_s(dev),
1000//				offset);
1001//	}
1002
1003	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1004			IOC_AND_NETDEV_NAMES_s_s(dev),
1005			offset, len));
1006
1007	if (count > 1) {
1008		int szrem = len;
1009
1010//		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1011//			"for single packet, concatenating...\n",
1012//				IOC_AND_NETDEV_NAMES_s_s(dev)));
1013
1014		skb = (struct sk_buff *)dev_alloc_skb(len);
1015		if (!skb) {
1016			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1017					IOC_AND_NETDEV_NAMES_s_s(dev),
1018					__FILE__, __LINE__);
1019			return -ENOMEM;
1020		}
1021
1022		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1023		for (i = 0; i < count; i++) {
1024
1025			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1026			old_skb = priv->RcvCtl[ctx].skb;
1027
1028			l = priv->RcvCtl[ctx].len;
1029			if (szrem < l)
1030				l = szrem;
1031
1032//			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1033//					IOC_AND_NETDEV_NAMES_s_s(dev),
1034//					i, l));
1035
1036			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1037						    priv->RcvCtl[ctx].dma,
1038						    priv->RcvCtl[ctx].len,
1039						    PCI_DMA_FROMDEVICE);
1040			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1041
1042			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1043						       priv->RcvCtl[ctx].dma,
1044						       priv->RcvCtl[ctx].len,
1045						       PCI_DMA_FROMDEVICE);
1046
1047			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1048			szrem -= l;
1049		}
1050		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1051
1052	} else if (len < MPT_LAN_RX_COPYBREAK) {
1053
1054		old_skb = skb;
1055
1056		skb = (struct sk_buff *)dev_alloc_skb(len);
1057		if (!skb) {
1058			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1059					IOC_AND_NETDEV_NAMES_s_s(dev),
1060					__FILE__, __LINE__);
1061			return -ENOMEM;
1062		}
1063
1064		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1065					    priv->RcvCtl[ctx].dma,
1066					    priv->RcvCtl[ctx].len,
1067					    PCI_DMA_FROMDEVICE);
1068
1069		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1070
1071		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1072					       priv->RcvCtl[ctx].dma,
1073					       priv->RcvCtl[ctx].len,
1074					       PCI_DMA_FROMDEVICE);
1075
1076		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1077		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1078		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1079
1080	} else {
1081		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1082
1083		priv->RcvCtl[ctx].skb = NULL;
1084
1085		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1086				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1087		priv->RcvCtl[ctx].dma = 0;
1088
1089		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1090		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1091
1092		skb_put(skb,len);
1093	}
1094
1095	atomic_sub(count, &priv->buckets_out);
1096	priv->total_received += count;
1097
1098	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1099		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1100			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1101				IOC_AND_NETDEV_NAMES_s_s(dev),
1102				priv->mpt_rxfidx_tail,
1103				MPT_LAN_MAX_BUCKETS_OUT);
1104
1105		return -1;
1106	}
1107
1108	if (remaining == 0)
1109		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1110			"(priv->buckets_out = %d)\n",
1111			IOC_AND_NETDEV_NAMES_s_s(dev),
1112			atomic_read(&priv->buckets_out));
1113	else if (remaining < 10)
1114		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1115			"(priv->buckets_out = %d)\n",
1116			IOC_AND_NETDEV_NAMES_s_s(dev),
1117			remaining, atomic_read(&priv->buckets_out));
1118
1119	if ((remaining < priv->bucketthresh) &&
1120	    ((atomic_read(&priv->buckets_out) - remaining) >
1121	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1122
1123		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1124			"buckets_out count and fw's BucketsRemaining "
1125			"count has crossed the threshold, issuing a "
1126			"LanReset to clear the fw's hashtable. You may "
1127			"want to check your /var/log/messages for \"CRC "
1128			"error\" event notifications.\n");
1129
1130		mpt_lan_reset(dev);
1131		mpt_lan_wake_post_buckets_task(dev, 0);
1132	}
1133
1134	return mpt_lan_receive_skb(dev, skb);
1135}
1136
1137/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1138/* Simple SGE's only at the moment */
1139
1140static void
1141mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1142{
1143	struct net_device *dev = priv->dev;
1144	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1145	MPT_FRAME_HDR *mf;
1146	LANReceivePostRequest_t *pRecvReq;
1147	SGETransaction32_t *pTrans;
1148	SGESimple64_t *pSimple;
1149	struct sk_buff *skb;
1150	dma_addr_t dma;
1151	u32 curr, buckets, count, max;
1152	u32 len = (dev->mtu + dev->hard_header_len + 4);
1153	unsigned long flags;
1154	int i;
1155
1156	curr = atomic_read(&priv->buckets_out);
1157	buckets = (priv->max_buckets_out - curr);
1158
1159	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1160			IOC_AND_NETDEV_NAMES_s_s(dev),
1161			__func__, buckets, curr));
1162
1163	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1164			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1165
1166	while (buckets) {
1167		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1168		if (mf == NULL) {
1169			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1170				__func__);
1171			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1172				 __func__, buckets));
1173			goto out;
1174		}
1175		pRecvReq = (LANReceivePostRequest_t *) mf;
1176
1177		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1178		mpt_dev->RequestNB[i] = 0;
1179		count = buckets;
1180		if (count > max)
1181			count = max;
1182
1183		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1184		pRecvReq->ChainOffset = 0;
1185		pRecvReq->MsgFlags    = 0;
1186		pRecvReq->PortNumber  = priv->pnum;
1187
1188		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1189		pSimple = NULL;
1190
1191		for (i = 0; i < count; i++) {
1192			int ctx;
1193
1194			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1195			if (priv->mpt_rxfidx_tail < 0) {
1196				printk (KERN_ERR "%s: Can't alloc context\n",
1197					__func__);
1198				spin_unlock_irqrestore(&priv->rxfidx_lock,
1199						       flags);
1200				break;
1201			}
1202
1203			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1204
1205			skb = priv->RcvCtl[ctx].skb;
1206			if (skb && (priv->RcvCtl[ctx].len != len)) {
1207				pci_unmap_single(mpt_dev->pcidev,
1208						 priv->RcvCtl[ctx].dma,
1209						 priv->RcvCtl[ctx].len,
1210						 PCI_DMA_FROMDEVICE);
1211				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1212				skb = priv->RcvCtl[ctx].skb = NULL;
1213			}
1214
1215			if (skb == NULL) {
1216				skb = dev_alloc_skb(len);
1217				if (skb == NULL) {
1218					printk (KERN_WARNING
1219						MYNAM "/%s: Can't alloc skb\n",
1220						__func__);
1221					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1222					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1223					break;
1224				}
1225
1226				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1227						     len, PCI_DMA_FROMDEVICE);
1228
1229				priv->RcvCtl[ctx].skb = skb;
1230				priv->RcvCtl[ctx].dma = dma;
1231				priv->RcvCtl[ctx].len = len;
1232			}
1233
1234			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1235
1236			pTrans->ContextSize   = sizeof(u32);
1237			pTrans->DetailsLength = 0;
1238			pTrans->Flags         = 0;
1239			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1240
1241			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1242
1243			pSimple->FlagsLength = cpu_to_le32(
1244				((MPI_SGE_FLAGS_END_OF_BUFFER |
1245				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1246				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1247			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1248			if (sizeof(dma_addr_t) > sizeof(u32))
1249				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1250			else
1251				pSimple->Address.High = 0;
1252
1253			pTrans = (SGETransaction32_t *) (pSimple + 1);
1254		}
1255
1256		if (pSimple == NULL) {
1257/**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1258/**/				__func__);
1259			mpt_free_msg_frame(mpt_dev, mf);
1260			goto out;
1261		}
1262
1263		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1264
1265		pRecvReq->BucketCount = cpu_to_le32(i);
1266
1267/*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1268 *	for (i = 0; i < j + 2; i ++)
1269 *	    printk (" %08x", le32_to_cpu(msg[i]));
1270 *	printk ("\n");
1271 */
1272
1273		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1274
1275		priv->total_posted += i;
1276		buckets -= i;
1277		atomic_add(i, &priv->buckets_out);
1278	}
1279
1280out:
1281	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1282		  __func__, buckets, atomic_read(&priv->buckets_out)));
1283	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1284	__func__, priv->total_posted, priv->total_received));
1285
1286	clear_bit(0, &priv->post_buckets_active);
1287}
1288
1289static void
1290mpt_lan_post_receive_buckets_work(struct work_struct *work)
1291{
1292	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1293						  post_buckets_task.work));
1294}
1295
1296static const struct net_device_ops mpt_netdev_ops = {
1297	.ndo_open       = mpt_lan_open,
1298	.ndo_stop       = mpt_lan_close,
1299	.ndo_start_xmit = mpt_lan_sdu_send,
1300	.ndo_change_mtu = mpt_lan_change_mtu,
1301	.ndo_tx_timeout = mpt_lan_tx_timeout,
1302};
1303
1304/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1305static struct net_device *
1306mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1307{
1308	struct net_device *dev;
1309	struct mpt_lan_priv *priv;
1310	u8 HWaddr[FC_ALEN], *a;
1311
1312	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1313	if (!dev)
1314		return NULL;
1315
1316	dev->mtu = MPT_LAN_MTU;
1317
1318	priv = netdev_priv(dev);
1319
1320	priv->dev = dev;
1321	priv->mpt_dev = mpt_dev;
1322	priv->pnum = pnum;
1323
1324	INIT_DELAYED_WORK(&priv->post_buckets_task,
1325			  mpt_lan_post_receive_buckets_work);
1326	priv->post_buckets_active = 0;
1327
1328	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1329			__LINE__, dev->mtu + dev->hard_header_len + 4));
1330
1331	atomic_set(&priv->buckets_out, 0);
1332	priv->total_posted = 0;
1333	priv->total_received = 0;
1334	priv->max_buckets_out = max_buckets_out;
1335	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1336		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1337
1338	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1339			__LINE__,
1340			mpt_dev->pfacts[0].MaxLanBuckets,
1341			max_buckets_out,
1342			priv->max_buckets_out));
1343
1344	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1345	spin_lock_init(&priv->txfidx_lock);
1346	spin_lock_init(&priv->rxfidx_lock);
1347
1348	/*  Grab pre-fetched LANPage1 stuff. :-) */
1349	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1350
1351	HWaddr[0] = a[5];
1352	HWaddr[1] = a[4];
1353	HWaddr[2] = a[3];
1354	HWaddr[3] = a[2];
1355	HWaddr[4] = a[1];
1356	HWaddr[5] = a[0];
1357
1358	dev->addr_len = FC_ALEN;
1359	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1360	memset(dev->broadcast, 0xff, FC_ALEN);
1361
1362	/* The Tx queue is 127 deep on the 909.
1363	 * Give ourselves some breathing room.
1364	 */
1365	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1366			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1367
1368	dev->netdev_ops = &mpt_netdev_ops;
1369	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1370
1371	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1372		"and setting initial values\n"));
1373
1374	if (register_netdev(dev) != 0) {
1375		free_netdev(dev);
1376		dev = NULL;
1377	}
1378	return dev;
1379}
1380
1381static int
1382mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1383{
1384	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1385	struct net_device	*dev;
1386	int			i;
1387
1388	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1389		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1390		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1391		       ioc->name, ioc->pfacts[i].PortNumber,
1392		       ioc->pfacts[i].ProtocolFlags,
1393		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1394			       ioc->pfacts[i].ProtocolFlags));
1395
1396		if (!(ioc->pfacts[i].ProtocolFlags &
1397					MPI_PORTFACTS_PROTOCOL_LAN)) {
1398			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1399			       "seems to be disabled on this adapter port!\n",
1400			       ioc->name);
1401			continue;
1402		}
1403
1404		dev = mpt_register_lan_device(ioc, i);
1405		if (!dev) {
1406			printk(KERN_ERR MYNAM ": %s: Unable to register "
1407			       "port%d as a LAN device\n", ioc->name,
1408			       ioc->pfacts[i].PortNumber);
1409			continue;
1410		}
1411
1412		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1413		       "registered as '%s'\n", ioc->name, dev->name);
1414		printk(KERN_INFO MYNAM ": %s/%s: "
1415		       "LanAddr = %pM\n",
1416		       IOC_AND_NETDEV_NAMES_s_s(dev),
1417		       dev->dev_addr);
1418
1419		ioc->netdev = dev;
1420
1421		return 0;
1422	}
1423
1424	return -ENODEV;
1425}
1426
1427static void
1428mptlan_remove(struct pci_dev *pdev)
1429{
1430	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1431	struct net_device	*dev = ioc->netdev;
1432
1433	if(dev != NULL) {
1434		unregister_netdev(dev);
1435		free_netdev(dev);
1436	}
1437}
1438
1439static struct mpt_pci_driver mptlan_driver = {
1440	.probe		= mptlan_probe,
1441	.remove		= mptlan_remove,
1442};
1443
1444static int __init mpt_lan_init (void)
1445{
1446	show_mptmod_ver(LANAME, LANVER);
1447
1448	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1449				"lan_reply");
1450	if (LanCtx <= 0) {
1451		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1452		return -EBUSY;
1453	}
1454
1455	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1456
1457	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1458		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1459		       "handler with mptbase! The world is at an end! "
1460		       "Everything is fading to black! Goodbye.\n");
1461		return -EBUSY;
1462	}
1463
1464	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1465
1466	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1467	return 0;
1468}
1469
1470static void __exit mpt_lan_exit(void)
1471{
1472	mpt_device_driver_deregister(MPTLAN_DRIVER);
1473	mpt_reset_deregister(LanCtx);
1474
1475	if (LanCtx) {
1476		mpt_deregister(LanCtx);
1477		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1478	}
1479}
1480
1481module_init(mpt_lan_init);
1482module_exit(mpt_lan_exit);
1483
1484/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1485static unsigned short
1486mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1487{
1488	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1489	struct fcllc *fcllc;
1490
1491	skb_reset_mac_header(skb);
1492	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1493
1494	if (fch->dtype == htons(0xffff)) {
1495		u32 *p = (u32 *) fch;
1496
1497		swab32s(p + 0);
1498		swab32s(p + 1);
1499		swab32s(p + 2);
1500		swab32s(p + 3);
1501
1502		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1503				NETDEV_PTR_TO_IOC_NAME_s(dev));
1504		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1505				fch->saddr);
1506	}
1507
1508	if (*fch->daddr & 1) {
1509		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1510			skb->pkt_type = PACKET_BROADCAST;
1511		} else {
1512			skb->pkt_type = PACKET_MULTICAST;
1513		}
1514	} else {
1515		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1516			skb->pkt_type = PACKET_OTHERHOST;
1517		} else {
1518			skb->pkt_type = PACKET_HOST;
1519		}
1520	}
1521
1522	fcllc = (struct fcllc *)skb->data;
1523
1524	/* Strip the SNAP header from ARP packets since we don't
1525	 * pass them through to the 802.2/SNAP layers.
1526	 */
1527	if (fcllc->dsap == EXTENDED_SAP &&
1528		(fcllc->ethertype == htons(ETH_P_IP) ||
1529		 fcllc->ethertype == htons(ETH_P_ARP))) {
1530		skb_pull(skb, sizeof(struct fcllc));
1531		return fcllc->ethertype;
1532	}
1533
1534	return htons(ETH_P_802_2);
1535}
1536
1537/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1538