1/* Copyright (c) 2006 Coraid, Inc.  See COPYING for GPL terms. */
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/skbuff.h>
10#include <linux/netdevice.h>
11#include <linux/genhd.h>
12#include <asm/unaligned.h>
13#include "aoe.h"
14
15#define TIMERTICK (HZ / 10)
16#define MINTIMER (2 * TIMERTICK)
17#define MAXTIMER (HZ << 1)
18
19static int aoe_deadsecs = 60 * 3;
20module_param(aoe_deadsecs, int, 0644);
21MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
22
23struct sk_buff *
24new_skb(ulong len)
25{
26	struct sk_buff *skb;
27
28	skb = alloc_skb(len, GFP_ATOMIC);
29	if (skb) {
30		skb_reset_mac_header(skb);
31		skb_reset_network_header(skb);
32		skb->protocol = __constant_htons(ETH_P_AOE);
33		skb->priority = 0;
34		skb->next = skb->prev = NULL;
35
36		/* tell the network layer not to perform IP checksums
37		 * or to get the NIC to do it
38		 */
39		skb->ip_summed = CHECKSUM_NONE;
40	}
41	return skb;
42}
43
44static struct frame *
45getframe(struct aoedev *d, int tag)
46{
47	struct frame *f, *e;
48
49	f = d->frames;
50	e = f + d->nframes;
51	for (; f<e; f++)
52		if (f->tag == tag)
53			return f;
54	return NULL;
55}
56
57/*
58 * Leave the top bit clear so we have tagspace for userland.
59 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
60 * This driver reserves tag -1 to mean "unused frame."
61 */
62static int
63newtag(struct aoedev *d)
64{
65	register ulong n;
66
67	n = jiffies & 0xffff;
68	return n |= (++d->lasttag & 0x7fff) << 16;
69}
70
71static int
72aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
73{
74	u32 host_tag = newtag(d);
75
76	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
77	memcpy(h->dst, d->addr, sizeof h->dst);
78	h->type = __constant_cpu_to_be16(ETH_P_AOE);
79	h->verfl = AOE_HVER;
80	h->major = cpu_to_be16(d->aoemajor);
81	h->minor = d->aoeminor;
82	h->cmd = AOECMD_ATA;
83	h->tag = cpu_to_be32(host_tag);
84
85	return host_tag;
86}
87
88static inline void
89put_lba(struct aoe_atahdr *ah, sector_t lba)
90{
91	ah->lba0 = lba;
92	ah->lba1 = lba >>= 8;
93	ah->lba2 = lba >>= 8;
94	ah->lba3 = lba >>= 8;
95	ah->lba4 = lba >>= 8;
96	ah->lba5 = lba >>= 8;
97}
98
99static void
100aoecmd_ata_rw(struct aoedev *d, struct frame *f)
101{
102	struct aoe_hdr *h;
103	struct aoe_atahdr *ah;
104	struct buf *buf;
105	struct sk_buff *skb;
106	ulong bcnt;
107	register sector_t sector;
108	char writebit, extbit;
109
110	writebit = 0x10;
111	extbit = 0x4;
112
113	buf = d->inprocess;
114
115	sector = buf->sector;
116	bcnt = buf->bv_resid;
117	if (bcnt > d->maxbcnt)
118		bcnt = d->maxbcnt;
119
120	/* initialize the headers & frame */
121	skb = f->skb;
122	h = aoe_hdr(skb);
123	ah = (struct aoe_atahdr *) (h+1);
124	skb_put(skb, sizeof *h + sizeof *ah);
125	memset(h, 0, skb->len);
126	f->tag = aoehdr_atainit(d, h);
127	f->waited = 0;
128	f->buf = buf;
129	f->bufaddr = buf->bufaddr;
130	f->bcnt = bcnt;
131	f->lba = sector;
132
133	/* set up ata header */
134	ah->scnt = bcnt >> 9;
135	put_lba(ah, sector);
136	if (d->flags & DEVFL_EXT) {
137		ah->aflags |= AOEAFL_EXT;
138	} else {
139		extbit = 0;
140		ah->lba3 &= 0x0f;
141		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
142	}
143
144	if (bio_data_dir(buf->bio) == WRITE) {
145		skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
146			offset_in_page(f->bufaddr), bcnt);
147		ah->aflags |= AOEAFL_WRITE;
148		skb->len += bcnt;
149		skb->data_len = bcnt;
150	} else {
151		writebit = 0;
152	}
153
154	ah->cmdstat = WIN_READ | writebit | extbit;
155
156	/* mark all tracking fields and load out */
157	buf->nframesout += 1;
158	buf->bufaddr += bcnt;
159	buf->bv_resid -= bcnt;
160/* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
161	buf->resid -= bcnt;
162	buf->sector += bcnt >> 9;
163	if (buf->resid == 0) {
164		d->inprocess = NULL;
165	} else if (buf->bv_resid == 0) {
166		buf->bv++;
167		WARN_ON(buf->bv->bv_len == 0);
168		buf->bv_resid = buf->bv->bv_len;
169		buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
170	}
171
172	skb->dev = d->ifp;
173	skb = skb_clone(skb, GFP_ATOMIC);
174	if (skb == NULL)
175		return;
176	if (d->sendq_hd)
177		d->sendq_tl->next = skb;
178	else
179		d->sendq_hd = skb;
180	d->sendq_tl = skb;
181}
182
183/* some callers cannot sleep, and they can call this function,
184 * transmitting the packets later, when interrupts are on
185 */
186static struct sk_buff *
187aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
188{
189	struct aoe_hdr *h;
190	struct aoe_cfghdr *ch;
191	struct sk_buff *skb, *sl, *sl_tail;
192	struct net_device *ifp;
193
194	sl = sl_tail = NULL;
195
196	read_lock(&dev_base_lock);
197	for_each_netdev(ifp) {
198		dev_hold(ifp);
199		if (!is_aoe_netif(ifp))
200			goto cont;
201
202		skb = new_skb(sizeof *h + sizeof *ch);
203		if (skb == NULL) {
204			printk(KERN_INFO "aoe: skb alloc failure\n");
205			goto cont;
206		}
207		skb_put(skb, sizeof *h + sizeof *ch);
208		skb->dev = ifp;
209		if (sl_tail == NULL)
210			sl_tail = skb;
211		h = aoe_hdr(skb);
212		memset(h, 0, sizeof *h + sizeof *ch);
213
214		memset(h->dst, 0xff, sizeof h->dst);
215		memcpy(h->src, ifp->dev_addr, sizeof h->src);
216		h->type = __constant_cpu_to_be16(ETH_P_AOE);
217		h->verfl = AOE_HVER;
218		h->major = cpu_to_be16(aoemajor);
219		h->minor = aoeminor;
220		h->cmd = AOECMD_CFG;
221
222		skb->next = sl;
223		sl = skb;
224cont:
225		dev_put(ifp);
226	}
227	read_unlock(&dev_base_lock);
228
229	if (tail != NULL)
230		*tail = sl_tail;
231	return sl;
232}
233
234static struct frame *
235freeframe(struct aoedev *d)
236{
237	struct frame *f, *e;
238	int n = 0;
239
240	f = d->frames;
241	e = f + d->nframes;
242	for (; f<e; f++) {
243		if (f->tag != FREETAG)
244			continue;
245		if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
246			skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
247			skb_trim(f->skb, 0);
248			return f;
249		}
250		n++;
251	}
252	if (n == d->nframes)	/* wait for network layer */
253		d->flags |= DEVFL_KICKME;
254
255	return NULL;
256}
257
258/* enters with d->lock held */
259void
260aoecmd_work(struct aoedev *d)
261{
262	struct frame *f;
263	struct buf *buf;
264
265	if (d->flags & DEVFL_PAUSE) {
266		if (!aoedev_isbusy(d))
267			d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
268						d->aoeminor, &d->sendq_tl);
269		return;
270	}
271
272loop:
273	f = freeframe(d);
274	if (f == NULL)
275		return;
276	if (d->inprocess == NULL) {
277		if (list_empty(&d->bufq))
278			return;
279		buf = container_of(d->bufq.next, struct buf, bufs);
280		list_del(d->bufq.next);
281/*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
282		d->inprocess = buf;
283	}
284	aoecmd_ata_rw(d, f);
285	goto loop;
286}
287
288static void
289rexmit(struct aoedev *d, struct frame *f)
290{
291	struct sk_buff *skb;
292	struct aoe_hdr *h;
293	struct aoe_atahdr *ah;
294	char buf[128];
295	u32 n;
296
297	n = newtag(d);
298
299	snprintf(buf, sizeof buf,
300		"%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
301		"retransmit",
302		d->aoemajor, d->aoeminor, f->tag, jiffies, n);
303	aoechr_error(buf);
304
305	skb = f->skb;
306	h = aoe_hdr(skb);
307	ah = (struct aoe_atahdr *) (h+1);
308	f->tag = n;
309	h->tag = cpu_to_be32(n);
310	memcpy(h->dst, d->addr, sizeof h->dst);
311	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
312
313	n = DEFAULTBCNT / 512;
314	if (ah->scnt > n) {
315		ah->scnt = n;
316		if (ah->aflags & AOEAFL_WRITE) {
317			skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
318				offset_in_page(f->bufaddr), DEFAULTBCNT);
319			skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
320			skb->data_len = DEFAULTBCNT;
321		}
322		if (++d->lostjumbo > (d->nframes << 1))
323		if (d->maxbcnt != DEFAULTBCNT) {
324			printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
325				d->aoemajor, d->aoeminor, d->ifp->name);
326			d->maxbcnt = DEFAULTBCNT;
327			d->flags |= DEVFL_MAXBCNT;
328		}
329	}
330
331	skb->dev = d->ifp;
332	skb = skb_clone(skb, GFP_ATOMIC);
333	if (skb == NULL)
334		return;
335	if (d->sendq_hd)
336		d->sendq_tl->next = skb;
337	else
338		d->sendq_hd = skb;
339	d->sendq_tl = skb;
340}
341
342static int
343tsince(int tag)
344{
345	int n;
346
347	n = jiffies & 0xffff;
348	n -= tag & 0xffff;
349	if (n < 0)
350		n += 1<<16;
351	return n;
352}
353
354static void
355rexmit_timer(ulong vp)
356{
357	struct aoedev *d;
358	struct frame *f, *e;
359	struct sk_buff *sl;
360	register long timeout;
361	ulong flags, n;
362
363	d = (struct aoedev *) vp;
364	sl = NULL;
365
366	/* timeout is always ~150% of the moving average */
367	timeout = d->rttavg;
368	timeout += timeout >> 1;
369
370	spin_lock_irqsave(&d->lock, flags);
371
372	if (d->flags & DEVFL_TKILL) {
373		spin_unlock_irqrestore(&d->lock, flags);
374		return;
375	}
376	f = d->frames;
377	e = f + d->nframes;
378	for (; f<e; f++) {
379		if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
380			n = f->waited += timeout;
381			n /= HZ;
382			if (n > aoe_deadsecs) { /* waited too long for response */
383				aoedev_downdev(d);
384				break;
385			}
386			rexmit(d, f);
387		}
388	}
389	if (d->flags & DEVFL_KICKME) {
390		d->flags &= ~DEVFL_KICKME;
391		aoecmd_work(d);
392	}
393
394	sl = d->sendq_hd;
395	d->sendq_hd = d->sendq_tl = NULL;
396	if (sl) {
397		n = d->rttavg <<= 1;
398		if (n > MAXTIMER)
399			d->rttavg = MAXTIMER;
400	}
401
402	d->timer.expires = jiffies + TIMERTICK;
403	add_timer(&d->timer);
404
405	spin_unlock_irqrestore(&d->lock, flags);
406
407	aoenet_xmit(sl);
408}
409
410/* this function performs work that has been deferred until sleeping is OK
411 */
412void
413aoecmd_sleepwork(struct work_struct *work)
414{
415	struct aoedev *d = container_of(work, struct aoedev, work);
416
417	if (d->flags & DEVFL_GDALLOC)
418		aoeblk_gdalloc(d);
419
420	if (d->flags & DEVFL_NEWSIZE) {
421		struct block_device *bd;
422		unsigned long flags;
423		u64 ssize;
424
425		ssize = d->gd->capacity;
426		bd = bdget_disk(d->gd, 0);
427
428		if (bd) {
429			mutex_lock(&bd->bd_inode->i_mutex);
430			i_size_write(bd->bd_inode, (loff_t)ssize<<9);
431			mutex_unlock(&bd->bd_inode->i_mutex);
432			bdput(bd);
433		}
434		spin_lock_irqsave(&d->lock, flags);
435		d->flags |= DEVFL_UP;
436		d->flags &= ~DEVFL_NEWSIZE;
437		spin_unlock_irqrestore(&d->lock, flags);
438	}
439}
440
441static void
442ataid_complete(struct aoedev *d, unsigned char *id)
443{
444	u64 ssize;
445	u16 n;
446
447	/* word 83: command set supported */
448	n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
449
450	/* word 86: command set/feature enabled */
451	n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
452
453	if (n & (1<<10)) {	/* bit 10: LBA 48 */
454		d->flags |= DEVFL_EXT;
455
456		/* word 100: number lba48 sectors */
457		ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
458
459		/* set as in ide-disk.c:init_idedisk_capacity */
460		d->geo.cylinders = ssize;
461		d->geo.cylinders /= (255 * 63);
462		d->geo.heads = 255;
463		d->geo.sectors = 63;
464	} else {
465		d->flags &= ~DEVFL_EXT;
466
467		/* number lba28 sectors */
468		ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
469
470		/* NOTE: obsolete in ATA 6 */
471		d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
472		d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
473		d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
474	}
475
476	if (d->ssize != ssize)
477		printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
478			(unsigned long long)mac_addr(d->addr),
479			d->aoemajor, d->aoeminor,
480			d->fw_ver, (long long)ssize);
481	d->ssize = ssize;
482	d->geo.start = 0;
483	if (d->gd != NULL) {
484		d->gd->capacity = ssize;
485		d->flags |= DEVFL_NEWSIZE;
486	} else {
487		if (d->flags & DEVFL_GDALLOC) {
488			printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
489			       d->aoemajor, d->aoeminor,
490			       "it's already on!  This shouldn't happen.\n");
491			return;
492		}
493		d->flags |= DEVFL_GDALLOC;
494	}
495	schedule_work(&d->work);
496}
497
498static void
499calc_rttavg(struct aoedev *d, int rtt)
500{
501	register long n;
502
503	n = rtt;
504	if (n < 0) {
505		n = -rtt;
506		if (n < MINTIMER)
507			n = MINTIMER;
508		else if (n > MAXTIMER)
509			n = MAXTIMER;
510		d->mintimer += (n - d->mintimer) >> 1;
511	} else if (n < d->mintimer)
512		n = d->mintimer;
513	else if (n > MAXTIMER)
514		n = MAXTIMER;
515
516	/* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
517	n -= d->rttavg;
518	d->rttavg += n >> 2;
519}
520
521void
522aoecmd_ata_rsp(struct sk_buff *skb)
523{
524	struct aoedev *d;
525	struct aoe_hdr *hin, *hout;
526	struct aoe_atahdr *ahin, *ahout;
527	struct frame *f;
528	struct buf *buf;
529	struct sk_buff *sl;
530	register long n;
531	ulong flags;
532	char ebuf[128];
533	u16 aoemajor;
534
535	hin = aoe_hdr(skb);
536	aoemajor = be16_to_cpu(get_unaligned(&hin->major));
537	d = aoedev_by_aoeaddr(aoemajor, hin->minor);
538	if (d == NULL) {
539		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
540			"for unknown device %d.%d\n",
541			 aoemajor, hin->minor);
542		aoechr_error(ebuf);
543		return;
544	}
545
546	spin_lock_irqsave(&d->lock, flags);
547
548	n = be32_to_cpu(get_unaligned(&hin->tag));
549	f = getframe(d, n);
550	if (f == NULL) {
551		calc_rttavg(d, -tsince(n));
552		spin_unlock_irqrestore(&d->lock, flags);
553		snprintf(ebuf, sizeof ebuf,
554			"%15s e%d.%d    tag=%08x@%08lx\n",
555			"unexpected rsp",
556			be16_to_cpu(get_unaligned(&hin->major)),
557			hin->minor,
558			be32_to_cpu(get_unaligned(&hin->tag)),
559			jiffies);
560		aoechr_error(ebuf);
561		return;
562	}
563
564	calc_rttavg(d, tsince(f->tag));
565
566	ahin = (struct aoe_atahdr *) (hin+1);
567	hout = aoe_hdr(f->skb);
568	ahout = (struct aoe_atahdr *) (hout+1);
569	buf = f->buf;
570
571	if (ahout->cmdstat == WIN_IDENTIFY)
572		d->flags &= ~DEVFL_PAUSE;
573	if (ahin->cmdstat & 0xa9) {	/* these bits cleared on success */
574		printk(KERN_ERR
575			"aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
576			ahout->cmdstat, ahin->cmdstat,
577			d->aoemajor, d->aoeminor);
578		if (buf)
579			buf->flags |= BUFFL_FAIL;
580	} else {
581		n = ahout->scnt << 9;
582		switch (ahout->cmdstat) {
583		case WIN_READ:
584		case WIN_READ_EXT:
585			if (skb->len - sizeof *hin - sizeof *ahin < n) {
586				printk(KERN_ERR
587					"aoe: runt data size in read.  skb->len=%d\n",
588					skb->len);
589				/* fail frame f?  just returning will rexmit. */
590				spin_unlock_irqrestore(&d->lock, flags);
591				return;
592			}
593			memcpy(f->bufaddr, ahin+1, n);
594		case WIN_WRITE:
595		case WIN_WRITE_EXT:
596			if (f->bcnt -= n) {
597				skb = f->skb;
598				f->bufaddr += n;
599				put_lba(ahout, f->lba += ahout->scnt);
600				n = f->bcnt;
601				if (n > DEFAULTBCNT)
602					n = DEFAULTBCNT;
603				ahout->scnt = n >> 9;
604				if (ahout->aflags & AOEAFL_WRITE) {
605					skb_fill_page_desc(skb, 0,
606						virt_to_page(f->bufaddr),
607						offset_in_page(f->bufaddr), n);
608					skb->len = sizeof *hout + sizeof *ahout + n;
609					skb->data_len = n;
610				}
611				f->tag = newtag(d);
612				hout->tag = cpu_to_be32(f->tag);
613				skb->dev = d->ifp;
614				skb = skb_clone(skb, GFP_ATOMIC);
615				spin_unlock_irqrestore(&d->lock, flags);
616				if (skb)
617					aoenet_xmit(skb);
618				return;
619			}
620			if (n > DEFAULTBCNT)
621				d->lostjumbo = 0;
622			break;
623		case WIN_IDENTIFY:
624			if (skb->len - sizeof *hin - sizeof *ahin < 512) {
625				printk(KERN_INFO
626					"aoe: runt data size in ataid.  skb->len=%d\n",
627					skb->len);
628				spin_unlock_irqrestore(&d->lock, flags);
629				return;
630			}
631			ataid_complete(d, (char *) (ahin+1));
632			break;
633		default:
634			printk(KERN_INFO
635				"aoe: unrecognized ata command %2.2Xh for %d.%d\n",
636				ahout->cmdstat,
637				be16_to_cpu(get_unaligned(&hin->major)),
638				hin->minor);
639		}
640	}
641
642	if (buf) {
643		buf->nframesout -= 1;
644		if (buf->nframesout == 0 && buf->resid == 0) {
645			unsigned long duration = jiffies - buf->start_time;
646			unsigned long n_sect = buf->bio->bi_size >> 9;
647			struct gendisk *disk = d->gd;
648			const int rw = bio_data_dir(buf->bio);
649
650			disk_stat_inc(disk, ios[rw]);
651			disk_stat_add(disk, ticks[rw], duration);
652			disk_stat_add(disk, sectors[rw], n_sect);
653			disk_stat_add(disk, io_ticks, duration);
654			n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
655			bio_endio(buf->bio, buf->bio->bi_size, n);
656			mempool_free(buf, d->bufpool);
657		}
658	}
659
660	f->buf = NULL;
661	f->tag = FREETAG;
662
663	aoecmd_work(d);
664	sl = d->sendq_hd;
665	d->sendq_hd = d->sendq_tl = NULL;
666
667	spin_unlock_irqrestore(&d->lock, flags);
668	aoenet_xmit(sl);
669}
670
671void
672aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
673{
674	struct sk_buff *sl;
675
676	sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
677
678	aoenet_xmit(sl);
679}
680
681/*
682 * Since we only call this in one place (and it only prepares one frame)
683 * we just return the skb.  Usually we'd chain it up to the aoedev sendq.
684 */
685static struct sk_buff *
686aoecmd_ata_id(struct aoedev *d)
687{
688	struct aoe_hdr *h;
689	struct aoe_atahdr *ah;
690	struct frame *f;
691	struct sk_buff *skb;
692
693	f = freeframe(d);
694	if (f == NULL) {
695		printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
696		return NULL;
697	}
698
699	/* initialize the headers & frame */
700	skb = f->skb;
701	h = aoe_hdr(skb);
702	ah = (struct aoe_atahdr *) (h+1);
703	skb_put(skb, sizeof *h + sizeof *ah);
704	memset(h, 0, skb->len);
705	f->tag = aoehdr_atainit(d, h);
706	f->waited = 0;
707
708	/* set up ata header */
709	ah->scnt = 1;
710	ah->cmdstat = WIN_IDENTIFY;
711	ah->lba3 = 0xa0;
712
713	skb->dev = d->ifp;
714
715	d->rttavg = MAXTIMER;
716	d->timer.function = rexmit_timer;
717
718	return skb_clone(skb, GFP_ATOMIC);
719}
720
721void
722aoecmd_cfg_rsp(struct sk_buff *skb)
723{
724	struct aoedev *d;
725	struct aoe_hdr *h;
726	struct aoe_cfghdr *ch;
727	ulong flags, sysminor, aoemajor;
728	struct sk_buff *sl;
729	enum { MAXFRAMES = 16 };
730	u16 n;
731
732	h = aoe_hdr(skb);
733	ch = (struct aoe_cfghdr *) (h+1);
734
735	/*
736	 * Enough people have their dip switches set backwards to
737	 * warrant a loud message for this special case.
738	 */
739	aoemajor = be16_to_cpu(get_unaligned(&h->major));
740	if (aoemajor == 0xfff) {
741		printk(KERN_ERR "aoe: Warning: shelf address is all ones.  "
742			"Check shelf dip switches.\n");
743		return;
744	}
745
746	sysminor = SYSMINOR(aoemajor, h->minor);
747	if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
748		printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
749			aoemajor, (int) h->minor);
750		return;
751	}
752
753	n = be16_to_cpu(ch->bufcnt);
754	if (n > MAXFRAMES)	/* keep it reasonable */
755		n = MAXFRAMES;
756
757	d = aoedev_by_sysminor_m(sysminor, n);
758	if (d == NULL) {
759		printk(KERN_INFO "aoe: device sysminor_m failure\n");
760		return;
761	}
762
763	spin_lock_irqsave(&d->lock, flags);
764
765	/* permit device to migrate mac and network interface */
766	d->ifp = skb->dev;
767	memcpy(d->addr, h->src, sizeof d->addr);
768	if (!(d->flags & DEVFL_MAXBCNT)) {
769		n = d->ifp->mtu;
770		n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
771		n /= 512;
772		if (n > ch->scnt)
773			n = ch->scnt;
774		n = n ? n * 512 : DEFAULTBCNT;
775		if (n != d->maxbcnt) {
776			printk(KERN_INFO
777				"aoe: e%ld.%ld: setting %d byte data frames on %s\n",
778				d->aoemajor, d->aoeminor, n, d->ifp->name);
779			d->maxbcnt = n;
780		}
781	}
782
783	/* don't change users' perspective */
784	if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
785		spin_unlock_irqrestore(&d->lock, flags);
786		return;
787	}
788	d->flags |= DEVFL_PAUSE;	/* force pause */
789	d->mintimer = MINTIMER;
790	d->fw_ver = be16_to_cpu(ch->fwver);
791
792	/* check for already outstanding ataid */
793	sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
794
795	spin_unlock_irqrestore(&d->lock, flags);
796
797	aoenet_xmit(sl);
798}
799