1/*
2 * linux/drivers/s390/net/qeth_main.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 *
8 *    Author(s): Original Code written by
9 *			  Utz Bacher (utz.bacher@de.ibm.com)
10 *		 Rewritten by
11 *			  Frank Pavlic (fpavlic@de.ibm.com) and
12 *		 	  Thomas Spatzier <tspat@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/string.h>
33#include <linux/errno.h>
34#include <linux/mm.h>
35#include <linux/ip.h>
36#include <linux/inetdevice.h>
37#include <linux/netdevice.h>
38#include <linux/sched.h>
39#include <linux/workqueue.h>
40#include <linux/kernel.h>
41#include <linux/slab.h>
42#include <linux/interrupt.h>
43#include <linux/tcp.h>
44#include <linux/icmp.h>
45#include <linux/skbuff.h>
46#include <linux/in.h>
47#include <linux/igmp.h>
48#include <linux/init.h>
49#include <linux/reboot.h>
50#include <linux/mii.h>
51#include <linux/rcupdate.h>
52#include <linux/ethtool.h>
53
54#include <net/arp.h>
55#include <net/ip.h>
56#include <net/route.h>
57
58#include <asm/ebcdic.h>
59#include <asm/io.h>
60#include <asm/qeth.h>
61#include <asm/timex.h>
62#include <asm/semaphore.h>
63#include <asm/uaccess.h>
64#include <asm/s390_rdev.h>
65
66#include "qeth.h"
67#include "qeth_mpc.h"
68#include "qeth_fs.h"
69#include "qeth_eddp.h"
70#include "qeth_tso.h"
71
72static const char *version = "qeth S/390 OSA-Express driver";
73
74/**
75 * Debug Facility Stuff
76 */
77static debug_info_t *qeth_dbf_setup = NULL;
78static debug_info_t *qeth_dbf_data = NULL;
79static debug_info_t *qeth_dbf_misc = NULL;
80static debug_info_t *qeth_dbf_control = NULL;
81debug_info_t *qeth_dbf_trace = NULL;
82static debug_info_t *qeth_dbf_sense = NULL;
83static debug_info_t *qeth_dbf_qerr = NULL;
84
85DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
86
87static struct lock_class_key qdio_out_skb_queue_key;
88
89/**
90 * some more definitions and declarations
91 */
92static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
93
94/* list of our cards */
95struct qeth_card_list_struct qeth_card_list;
96/*process list want to be notified*/
97spinlock_t qeth_notify_lock;
98struct list_head qeth_notify_list;
99
100static void qeth_send_control_data_cb(struct qeth_channel *,
101				      struct qeth_cmd_buffer *);
102
103/**
104 * here we go with function implementation
105 */
106static void
107qeth_init_qdio_info(struct qeth_card *card);
108
109static int
110qeth_init_qdio_queues(struct qeth_card *card);
111
112static int
113qeth_alloc_qdio_buffers(struct qeth_card *card);
114
115static void
116qeth_free_qdio_buffers(struct qeth_card *);
117
118static void
119qeth_clear_qdio_buffers(struct qeth_card *);
120
121static void
122qeth_clear_ip_list(struct qeth_card *, int, int);
123
124static void
125qeth_clear_ipacmd_list(struct qeth_card *);
126
127static int
128qeth_qdio_clear_card(struct qeth_card *, int);
129
130static void
131qeth_clear_working_pool_list(struct qeth_card *);
132
133static void
134qeth_clear_cmd_buffers(struct qeth_channel *);
135
136static int
137qeth_stop(struct net_device *);
138
139static void
140qeth_clear_ipato_list(struct qeth_card *);
141
142static int
143qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
144
145static void
146qeth_irq_tasklet(unsigned long);
147
148static int
149qeth_set_online(struct ccwgroup_device *);
150
151static int
152__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
153
154static struct qeth_ipaddr *
155qeth_get_addr_buffer(enum qeth_prot_versions);
156
157static void
158qeth_set_multicast_list(struct net_device *);
159
160static void
161qeth_setadp_promisc_mode(struct qeth_card *);
162
163static void
164qeth_notify_processes(void)
165{
166	/*notify all  registered processes */
167	struct qeth_notify_list_struct *n_entry;
168
169	QETH_DBF_TEXT(trace,3,"procnoti");
170	spin_lock(&qeth_notify_lock);
171	list_for_each_entry(n_entry, &qeth_notify_list, list) {
172		send_sig(n_entry->signum, n_entry->task, 1);
173	}
174	spin_unlock(&qeth_notify_lock);
175
176}
177int
178qeth_notifier_unregister(struct task_struct *p)
179{
180	struct qeth_notify_list_struct *n_entry, *tmp;
181
182	QETH_DBF_TEXT(trace, 2, "notunreg");
183	spin_lock(&qeth_notify_lock);
184	list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
185		if (n_entry->task == p) {
186			list_del(&n_entry->list);
187			kfree(n_entry);
188			goto out;
189		}
190	}
191out:
192	spin_unlock(&qeth_notify_lock);
193	return 0;
194}
195int
196qeth_notifier_register(struct task_struct *p, int signum)
197{
198	struct qeth_notify_list_struct *n_entry;
199
200	/*check first if entry already exists*/
201	spin_lock(&qeth_notify_lock);
202	list_for_each_entry(n_entry, &qeth_notify_list, list) {
203		if (n_entry->task == p) {
204			n_entry->signum = signum;
205			spin_unlock(&qeth_notify_lock);
206			return 0;
207		}
208	}
209	spin_unlock(&qeth_notify_lock);
210
211	n_entry = (struct qeth_notify_list_struct *)
212		kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
213	if (!n_entry)
214		return -ENOMEM;
215	n_entry->task = p;
216	n_entry->signum = signum;
217	spin_lock(&qeth_notify_lock);
218	list_add(&n_entry->list,&qeth_notify_list);
219	spin_unlock(&qeth_notify_lock);
220	return 0;
221}
222
223
224/**
225 * free channel command buffers
226 */
227static void
228qeth_clean_channel(struct qeth_channel *channel)
229{
230	int cnt;
231
232	QETH_DBF_TEXT(setup, 2, "freech");
233	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
234		kfree(channel->iob[cnt].data);
235}
236
237/**
238 * free card
239 */
240static void
241qeth_free_card(struct qeth_card *card)
242{
243
244	QETH_DBF_TEXT(setup, 2, "freecrd");
245	QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
246	qeth_clean_channel(&card->read);
247	qeth_clean_channel(&card->write);
248	if (card->dev)
249		free_netdev(card->dev);
250	qeth_clear_ip_list(card, 0, 0);
251	qeth_clear_ipato_list(card);
252	kfree(card->ip_tbd_list);
253	qeth_free_qdio_buffers(card);
254	kfree(card);
255}
256
257/**
258 * alloc memory for command buffer per channel
259 */
260static int
261qeth_setup_channel(struct qeth_channel *channel)
262{
263	int cnt;
264
265	QETH_DBF_TEXT(setup, 2, "setupch");
266	for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
267		channel->iob[cnt].data = (char *)
268			kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
269		if (channel->iob[cnt].data == NULL)
270			break;
271		channel->iob[cnt].state = BUF_STATE_FREE;
272		channel->iob[cnt].channel = channel;
273		channel->iob[cnt].callback = qeth_send_control_data_cb;
274		channel->iob[cnt].rc = 0;
275	}
276	if (cnt < QETH_CMD_BUFFER_NO) {
277		while (cnt-- > 0)
278			kfree(channel->iob[cnt].data);
279		return -ENOMEM;
280	}
281	channel->buf_no = 0;
282	channel->io_buf_no = 0;
283	atomic_set(&channel->irq_pending, 0);
284	spin_lock_init(&channel->iob_lock);
285
286	init_waitqueue_head(&channel->wait_q);
287	channel->irq_tasklet.data = (unsigned long) channel;
288	channel->irq_tasklet.func = qeth_irq_tasklet;
289	return 0;
290}
291
292/**
293 * alloc memory for card structure
294 */
295static struct qeth_card *
296qeth_alloc_card(void)
297{
298	struct qeth_card *card;
299
300	QETH_DBF_TEXT(setup, 2, "alloccrd");
301	card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
302	if (!card)
303		return NULL;
304	QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
305	if (qeth_setup_channel(&card->read)) {
306		kfree(card);
307		return NULL;
308	}
309	if (qeth_setup_channel(&card->write)) {
310		qeth_clean_channel(&card->read);
311		kfree(card);
312		return NULL;
313	}
314	return card;
315}
316
317static long
318__qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm,
319		       struct irb *irb)
320{
321	if (!IS_ERR(irb))
322		return 0;
323
324	switch (PTR_ERR(irb)) {
325	case -EIO:
326		PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
327		QETH_DBF_TEXT(trace, 2, "ckirberr");
328		QETH_DBF_TEXT_(trace, 2, "  rc%d", -EIO);
329		break;
330	case -ETIMEDOUT:
331		PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
332		QETH_DBF_TEXT(trace, 2, "ckirberr");
333		QETH_DBF_TEXT_(trace, 2, "  rc%d", -ETIMEDOUT);
334		if (intparm == QETH_RCD_PARM) {
335			struct qeth_card *card = CARD_FROM_CDEV(cdev);
336
337			if (card && (card->data.ccwdev == cdev)) {
338				card->data.state = CH_STATE_DOWN;
339				wake_up(&card->wait_q);
340			}
341		}
342		break;
343	default:
344		PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
345			   cdev->dev.bus_id);
346		QETH_DBF_TEXT(trace, 2, "ckirberr");
347		QETH_DBF_TEXT(trace, 2, "  rc???");
348	}
349	return PTR_ERR(irb);
350}
351
352static int
353qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
354{
355	int dstat,cstat;
356	char *sense;
357
358	sense = (char *) irb->ecw;
359	cstat = irb->scsw.cstat;
360	dstat = irb->scsw.dstat;
361
362	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
363		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
364		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
365		QETH_DBF_TEXT(trace,2, "CGENCHK");
366		PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
367			   cdev->dev.bus_id, dstat, cstat);
368		HEXDUMP16(WARN, "irb: ", irb);
369		HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
370		return 1;
371	}
372
373	if (dstat & DEV_STAT_UNIT_CHECK) {
374		if (sense[SENSE_RESETTING_EVENT_BYTE] &
375		    SENSE_RESETTING_EVENT_FLAG) {
376			QETH_DBF_TEXT(trace,2,"REVIND");
377			return 1;
378		}
379		if (sense[SENSE_COMMAND_REJECT_BYTE] &
380		    SENSE_COMMAND_REJECT_FLAG) {
381			QETH_DBF_TEXT(trace,2,"CMDREJi");
382			return 0;
383		}
384		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
385			QETH_DBF_TEXT(trace,2,"AFFE");
386			return 1;
387		}
388		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
389			QETH_DBF_TEXT(trace,2,"ZEROSEN");
390			return 0;
391		}
392		QETH_DBF_TEXT(trace,2,"DGENCHK");
393			return 1;
394	}
395	return 0;
396}
397static int qeth_issue_next_read(struct qeth_card *);
398
399/**
400 * interrupt handler
401 */
402static void
403qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
404{
405	int rc;
406	int cstat,dstat;
407	struct qeth_cmd_buffer *buffer;
408	struct qeth_channel *channel;
409	struct qeth_card *card;
410
411	QETH_DBF_TEXT(trace,5,"irq");
412
413	if (__qeth_check_irb_error(cdev, intparm, irb))
414		return;
415	cstat = irb->scsw.cstat;
416	dstat = irb->scsw.dstat;
417
418	card = CARD_FROM_CDEV(cdev);
419	if (!card)
420		return;
421
422	if (card->read.ccwdev == cdev){
423		channel = &card->read;
424		QETH_DBF_TEXT(trace,5,"read");
425	} else if (card->write.ccwdev == cdev) {
426		channel = &card->write;
427		QETH_DBF_TEXT(trace,5,"write");
428	} else {
429		channel = &card->data;
430		QETH_DBF_TEXT(trace,5,"data");
431	}
432	atomic_set(&channel->irq_pending, 0);
433
434	if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
435		channel->state = CH_STATE_STOPPED;
436
437	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
438		channel->state = CH_STATE_HALTED;
439
440	/*let's wake up immediately on data channel*/
441	if ((channel == &card->data) && (intparm != 0) &&
442	    (intparm != QETH_RCD_PARM))
443		goto out;
444
445	if (intparm == QETH_CLEAR_CHANNEL_PARM) {
446		QETH_DBF_TEXT(trace, 6, "clrchpar");
447		/* we don't have to handle this further */
448		intparm = 0;
449	}
450	if (intparm == QETH_HALT_CHANNEL_PARM) {
451		QETH_DBF_TEXT(trace, 6, "hltchpar");
452		/* we don't have to handle this further */
453		intparm = 0;
454	}
455	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
456	    (dstat & DEV_STAT_UNIT_CHECK) ||
457	    (cstat)) {
458		if (irb->esw.esw0.erw.cons) {
459			/* TODO: we should make this s390dbf */
460			PRINT_WARN("sense data available on channel %s.\n",
461				   CHANNEL_ID(channel));
462			PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
463			HEXDUMP16(WARN,"irb: ",irb);
464			HEXDUMP16(WARN,"sense data: ",irb->ecw);
465		}
466		if (intparm == QETH_RCD_PARM) {
467			channel->state = CH_STATE_DOWN;
468			goto out;
469		}
470		rc = qeth_get_problem(cdev,irb);
471		if (rc) {
472			qeth_schedule_recovery(card);
473			goto out;
474		}
475	}
476
477	if (intparm == QETH_RCD_PARM) {
478		channel->state = CH_STATE_RCD_DONE;
479		goto out;
480	}
481	if (intparm) {
482		buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
483		buffer->state = BUF_STATE_PROCESSED;
484	}
485	if (channel == &card->data)
486		return;
487
488	if (channel == &card->read &&
489	    channel->state == CH_STATE_UP)
490		qeth_issue_next_read(card);
491
492	qeth_irq_tasklet((unsigned long)channel);
493	return;
494out:
495	wake_up(&card->wait_q);
496}
497
498/**
499 * tasklet function scheduled from irq handler
500 */
501static void
502qeth_irq_tasklet(unsigned long data)
503{
504	struct qeth_card *card;
505	struct qeth_channel *channel;
506	struct qeth_cmd_buffer *iob;
507	__u8 index;
508
509	QETH_DBF_TEXT(trace,5,"irqtlet");
510	channel = (struct qeth_channel *) data;
511	iob = channel->iob;
512	index = channel->buf_no;
513	card = CARD_FROM_CDEV(channel->ccwdev);
514	while (iob[index].state == BUF_STATE_PROCESSED) {
515		if (iob[index].callback !=NULL) {
516			iob[index].callback(channel,iob + index);
517		}
518		index = (index + 1) % QETH_CMD_BUFFER_NO;
519	}
520	channel->buf_no = index;
521	wake_up(&card->wait_q);
522}
523
524static int qeth_stop_card(struct qeth_card *, int);
525
526static int
527__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
528{
529	struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
530	int rc = 0, rc2 = 0, rc3 = 0;
531	enum qeth_card_states recover_flag;
532
533	QETH_DBF_TEXT(setup, 3, "setoffl");
534	QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
535
536	if (card->dev && netif_carrier_ok(card->dev))
537		netif_carrier_off(card->dev);
538	recover_flag = card->state;
539	if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
540		PRINT_WARN("Stopping card %s interrupted by user!\n",
541			   CARD_BUS_ID(card));
542		return -ERESTARTSYS;
543	}
544	rc  = ccw_device_set_offline(CARD_DDEV(card));
545	rc2 = ccw_device_set_offline(CARD_WDEV(card));
546	rc3 = ccw_device_set_offline(CARD_RDEV(card));
547	if (!rc)
548		rc = (rc2) ? rc2 : rc3;
549	if (rc)
550		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
551	if (recover_flag == CARD_STATE_UP)
552		card->state = CARD_STATE_RECOVER;
553	qeth_notify_processes();
554	return 0;
555}
556
557static int
558qeth_set_offline(struct ccwgroup_device *cgdev)
559{
560	return  __qeth_set_offline(cgdev, 0);
561}
562
563static int
564qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
565
566
567static void
568qeth_remove_device(struct ccwgroup_device *cgdev)
569{
570	struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
571	unsigned long flags;
572
573	QETH_DBF_TEXT(setup, 3, "rmdev");
574	QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
575
576	if (!card)
577		return;
578
579	if (qeth_wait_for_threads(card, 0xffffffff))
580		return;
581
582	if (cgdev->state == CCWGROUP_ONLINE){
583		card->use_hard_stop = 1;
584		qeth_set_offline(cgdev);
585	}
586	/* remove form our internal list */
587	write_lock_irqsave(&qeth_card_list.rwlock, flags);
588	list_del(&card->list);
589	write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
590	if (card->dev)
591		unregister_netdev(card->dev);
592	qeth_remove_device_attributes(&cgdev->dev);
593	qeth_free_card(card);
594	cgdev->dev.driver_data = NULL;
595	put_device(&cgdev->dev);
596}
597
598static int
599qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
600static int
601qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
602
603/**
604 * Add/remove address to/from card's ip list, i.e. try to add or remove
605 * reference to/from an IP address that is already registered on the card.
606 * Returns:
607 * 	0  address was on card and its reference count has been adjusted,
608 * 	   but is still > 0, so nothing has to be done
609 * 	   also returns 0 if card was not on card and the todo was to delete
610 * 	   the address -> there is also nothing to be done
611 * 	1  address was not on card and the todo is to add it to the card's ip
612 * 	   list
613 * 	-1 address was on card and its reference count has been decremented
614 * 	   to <= 0 by the todo -> address must be removed from card
615 */
616static int
617__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
618		      struct qeth_ipaddr **__addr)
619{
620	struct qeth_ipaddr *addr;
621	int found = 0;
622
623	list_for_each_entry(addr, &card->ip_list, entry) {
624		if (card->options.layer2) {
625			if ((addr->type == todo->type) &&
626			    (memcmp(&addr->mac, &todo->mac,
627				    OSA_ADDR_LEN) == 0)) {
628				found = 1;
629				break;
630			}
631			continue;
632		}
633		if ((addr->proto     == QETH_PROT_IPV4)  &&
634		    (todo->proto     == QETH_PROT_IPV4)  &&
635		    (addr->type      == todo->type)      &&
636		    (addr->u.a4.addr == todo->u.a4.addr) &&
637		    (addr->u.a4.mask == todo->u.a4.mask)) {
638			found = 1;
639			break;
640		}
641		if ((addr->proto       == QETH_PROT_IPV6)     &&
642		    (todo->proto       == QETH_PROT_IPV6)     &&
643		    (addr->type        == todo->type)         &&
644		    (addr->u.a6.pfxlen == todo->u.a6.pfxlen)  &&
645		    (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
646			    sizeof(struct in6_addr)) == 0)) {
647			found = 1;
648			break;
649		}
650	}
651	if (found) {
652		addr->users += todo->users;
653		if (addr->users <= 0){
654			*__addr = addr;
655			return -1;
656		} else {
657			/* for VIPA and RXIP limit refcount to 1 */
658			if (addr->type != QETH_IP_TYPE_NORMAL)
659				addr->users = 1;
660			return 0;
661		}
662	}
663	if (todo->users > 0) {
664		/* for VIPA and RXIP limit refcount to 1 */
665		if (todo->type != QETH_IP_TYPE_NORMAL)
666			todo->users = 1;
667		return 1;
668	} else
669		return 0;
670}
671
672static int
673__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
674		              int same_type)
675{
676	struct qeth_ipaddr *tmp;
677
678	list_for_each_entry(tmp, list, entry) {
679		if ((tmp->proto     == QETH_PROT_IPV4)            &&
680		    (addr->proto    == QETH_PROT_IPV4)            &&
681		    ((same_type && (tmp->type == addr->type)) ||
682		     (!same_type && (tmp->type != addr->type))  ) &&
683		    (tmp->u.a4.addr == addr->u.a4.addr)             ){
684			return 1;
685		}
686		if ((tmp->proto  == QETH_PROT_IPV6)               &&
687		    (addr->proto == QETH_PROT_IPV6)               &&
688		    ((same_type && (tmp->type == addr->type)) ||
689		     (!same_type && (tmp->type != addr->type))  ) &&
690		    (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
691			    sizeof(struct in6_addr)) == 0)          ) {
692			return 1;
693		}
694	}
695	return 0;
696}
697
698/*
699 * Add IP to be added to todo list. If there is already an "add todo"
700 * in this list we just incremenent the reference count.
701 * Returns 0 if we  just incremented reference count.
702 */
703static int
704__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
705{
706	struct qeth_ipaddr *tmp, *t;
707	int found = 0;
708
709	list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
710		if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
711		    (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
712			return 0;
713		if (card->options.layer2) {
714			if ((tmp->type	== addr->type)	&&
715			    (tmp->is_multicast == addr->is_multicast) &&
716			    (memcmp(&tmp->mac, &addr->mac,
717				    OSA_ADDR_LEN) == 0)) {
718				found = 1;
719				break;
720			}
721			continue;
722		}
723		if ((tmp->proto        == QETH_PROT_IPV4)     &&
724		    (addr->proto       == QETH_PROT_IPV4)     &&
725		    (tmp->type         == addr->type)         &&
726		    (tmp->is_multicast == addr->is_multicast) &&
727		    (tmp->u.a4.addr    == addr->u.a4.addr)    &&
728		    (tmp->u.a4.mask    == addr->u.a4.mask)) {
729			found = 1;
730			break;
731		}
732		if ((tmp->proto        == QETH_PROT_IPV6)      &&
733		    (addr->proto       == QETH_PROT_IPV6)      &&
734		    (tmp->type         == addr->type)          &&
735		    (tmp->is_multicast == addr->is_multicast)  &&
736		    (tmp->u.a6.pfxlen  == addr->u.a6.pfxlen)   &&
737		    (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
738			    sizeof(struct in6_addr)) == 0)) {
739			found = 1;
740			break;
741		}
742	}
743	if (found){
744		if (addr->users != 0)
745			tmp->users += addr->users;
746		else
747			tmp->users += add? 1:-1;
748		if (tmp->users == 0) {
749			list_del(&tmp->entry);
750			kfree(tmp);
751		}
752		return 0;
753	} else {
754		if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
755			list_add(&addr->entry, card->ip_tbd_list);
756		else {
757			if (addr->users == 0)
758				addr->users += add? 1:-1;
759			if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
760			    qeth_is_addr_covered_by_ipato(card, addr)){
761				QETH_DBF_TEXT(trace, 2, "tkovaddr");
762				addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
763			}
764			list_add_tail(&addr->entry, card->ip_tbd_list);
765		}
766		return 1;
767	}
768}
769
770/**
771 * Remove IP address from list
772 */
773static int
774qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
775{
776	unsigned long flags;
777	int rc = 0;
778
779	QETH_DBF_TEXT(trace, 4, "delip");
780
781	if (card->options.layer2)
782		QETH_DBF_HEX(trace, 4, &addr->mac, 6);
783	else if (addr->proto == QETH_PROT_IPV4)
784		QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
785	else {
786		QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
787		QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
788	}
789	spin_lock_irqsave(&card->ip_lock, flags);
790	rc = __qeth_insert_ip_todo(card, addr, 0);
791	spin_unlock_irqrestore(&card->ip_lock, flags);
792	return rc;
793}
794
795static int
796qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
797{
798	unsigned long flags;
799	int rc = 0;
800
801	QETH_DBF_TEXT(trace, 4, "addip");
802	if (card->options.layer2)
803		QETH_DBF_HEX(trace, 4, &addr->mac, 6);
804	else if (addr->proto == QETH_PROT_IPV4)
805		QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
806	else {
807		QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
808		QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
809	}
810	spin_lock_irqsave(&card->ip_lock, flags);
811	rc = __qeth_insert_ip_todo(card, addr, 1);
812	spin_unlock_irqrestore(&card->ip_lock, flags);
813	return rc;
814}
815
816static void
817__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
818{
819	struct qeth_ipaddr *addr, *tmp;
820	int rc;
821again:
822	list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
823		if (addr->is_multicast) {
824			spin_unlock_irqrestore(&card->ip_lock, *flags);
825			rc = qeth_deregister_addr_entry(card, addr);
826			spin_lock_irqsave(&card->ip_lock, *flags);
827			if (!rc) {
828				list_del(&addr->entry);
829				kfree(addr);
830				goto again;
831			}
832		}
833	}
834}
835
836static void
837qeth_set_ip_addr_list(struct qeth_card *card)
838{
839	struct list_head *tbd_list;
840	struct qeth_ipaddr *todo, *addr;
841	unsigned long flags;
842	int rc;
843
844	QETH_DBF_TEXT(trace, 2, "sdiplist");
845	QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
846
847	spin_lock_irqsave(&card->ip_lock, flags);
848	tbd_list = card->ip_tbd_list;
849	card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
850	if (!card->ip_tbd_list) {
851		QETH_DBF_TEXT(trace, 0, "silnomem");
852		card->ip_tbd_list = tbd_list;
853		spin_unlock_irqrestore(&card->ip_lock, flags);
854		return;
855	} else
856		INIT_LIST_HEAD(card->ip_tbd_list);
857
858	while (!list_empty(tbd_list)){
859		todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
860		list_del(&todo->entry);
861		if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
862			__qeth_delete_all_mc(card, &flags);
863			kfree(todo);
864			continue;
865		}
866		rc = __qeth_ref_ip_on_card(card, todo, &addr);
867		if (rc == 0) {
868			/* nothing to be done; only adjusted refcount */
869			kfree(todo);
870		} else if (rc == 1) {
871			/* new entry to be added to on-card list */
872			spin_unlock_irqrestore(&card->ip_lock, flags);
873			rc = qeth_register_addr_entry(card, todo);
874			spin_lock_irqsave(&card->ip_lock, flags);
875			if (!rc)
876				list_add_tail(&todo->entry, &card->ip_list);
877			else
878				kfree(todo);
879		} else if (rc == -1) {
880			/* on-card entry to be removed */
881			list_del_init(&addr->entry);
882			spin_unlock_irqrestore(&card->ip_lock, flags);
883			rc = qeth_deregister_addr_entry(card, addr);
884			spin_lock_irqsave(&card->ip_lock, flags);
885			if (!rc)
886				kfree(addr);
887			else
888				list_add_tail(&addr->entry, &card->ip_list);
889			kfree(todo);
890		}
891	}
892	spin_unlock_irqrestore(&card->ip_lock, flags);
893	kfree(tbd_list);
894}
895
896static void qeth_delete_mc_addresses(struct qeth_card *);
897static void qeth_add_multicast_ipv4(struct qeth_card *);
898static void qeth_layer2_add_multicast(struct qeth_card *);
899#ifdef CONFIG_QETH_IPV6
900static void qeth_add_multicast_ipv6(struct qeth_card *);
901#endif
902
903static int
904qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
905{
906	unsigned long flags;
907
908	spin_lock_irqsave(&card->thread_mask_lock, flags);
909	if ( !(card->thread_allowed_mask & thread) ||
910	      (card->thread_start_mask & thread) ) {
911		spin_unlock_irqrestore(&card->thread_mask_lock, flags);
912		return -EPERM;
913	}
914	card->thread_start_mask |= thread;
915	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
916	return 0;
917}
918
919static void
920qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
921{
922	unsigned long flags;
923
924	spin_lock_irqsave(&card->thread_mask_lock, flags);
925	card->thread_start_mask &= ~thread;
926	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
927	wake_up(&card->wait_q);
928}
929
930static void
931qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
932{
933	unsigned long flags;
934
935	spin_lock_irqsave(&card->thread_mask_lock, flags);
936	card->thread_running_mask &= ~thread;
937	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
938	wake_up(&card->wait_q);
939}
940
941static int
942__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
943{
944	unsigned long flags;
945	int rc = 0;
946
947	spin_lock_irqsave(&card->thread_mask_lock, flags);
948	if (card->thread_start_mask & thread){
949		if ((card->thread_allowed_mask & thread) &&
950		    !(card->thread_running_mask & thread)){
951			rc = 1;
952			card->thread_start_mask &= ~thread;
953			card->thread_running_mask |= thread;
954		} else
955			rc = -EPERM;
956	}
957	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
958	return rc;
959}
960
961static int
962qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
963{
964	int rc = 0;
965
966	wait_event(card->wait_q,
967		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
968	return rc;
969}
970
971static int
972qeth_recover(void *ptr)
973{
974	struct qeth_card *card;
975	int rc = 0;
976
977	card = (struct qeth_card *) ptr;
978	daemonize("qeth_recover");
979	QETH_DBF_TEXT(trace,2,"recover1");
980	QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
981	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
982		return 0;
983	QETH_DBF_TEXT(trace,2,"recover2");
984	PRINT_WARN("Recovery of device %s started ...\n",
985		   CARD_BUS_ID(card));
986	card->use_hard_stop = 1;
987	__qeth_set_offline(card->gdev,1);
988	rc = __qeth_set_online(card->gdev,1);
989	/* don't run another scheduled recovery */
990	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
991	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
992	if (!rc)
993		PRINT_INFO("Device %s successfully recovered!\n",
994			   CARD_BUS_ID(card));
995	else
996		PRINT_INFO("Device %s could not be recovered!\n",
997			   CARD_BUS_ID(card));
998	return 0;
999}
1000
1001void
1002qeth_schedule_recovery(struct qeth_card *card)
1003{
1004	QETH_DBF_TEXT(trace,2,"startrec");
1005	if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
1006		schedule_work(&card->kernel_thread_starter);
1007}
1008
1009static int
1010qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1011{
1012	unsigned long flags;
1013	int rc = 0;
1014
1015	spin_lock_irqsave(&card->thread_mask_lock, flags);
1016	QETH_DBF_TEXT_(trace, 4, "  %02x%02x%02x",
1017			(u8) card->thread_start_mask,
1018			(u8) card->thread_allowed_mask,
1019			(u8) card->thread_running_mask);
1020	rc = (card->thread_start_mask & thread);
1021	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1022	return rc;
1023}
1024
1025static void
1026qeth_start_kernel_thread(struct work_struct *work)
1027{
1028	struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
1029	QETH_DBF_TEXT(trace , 2, "strthrd");
1030
1031	if (card->read.state != CH_STATE_UP &&
1032	    card->write.state != CH_STATE_UP)
1033		return;
1034	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1035		kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1036}
1037
1038
1039static void
1040qeth_set_intial_options(struct qeth_card *card)
1041{
1042	card->options.route4.type = NO_ROUTER;
1043#ifdef CONFIG_QETH_IPV6
1044	card->options.route6.type = NO_ROUTER;
1045#endif /* QETH_IPV6 */
1046	card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1047	card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1048	card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1049	card->options.fake_broadcast = 0;
1050	card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1051	card->options.fake_ll = 0;
1052	if (card->info.type == QETH_CARD_TYPE_OSN)
1053		card->options.layer2 = 1;
1054	else
1055		card->options.layer2 = 0;
1056	card->options.performance_stats = 0;
1057}
1058
1059/**
1060 * initialize channels ,card and all state machines
1061 */
1062static int
1063qeth_setup_card(struct qeth_card *card)
1064{
1065
1066	QETH_DBF_TEXT(setup, 2, "setupcrd");
1067	QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1068
1069	card->read.state  = CH_STATE_DOWN;
1070	card->write.state = CH_STATE_DOWN;
1071	card->data.state  = CH_STATE_DOWN;
1072	card->state = CARD_STATE_DOWN;
1073	card->lan_online = 0;
1074	card->use_hard_stop = 0;
1075	card->dev = NULL;
1076#ifdef CONFIG_QETH_VLAN
1077	spin_lock_init(&card->vlanlock);
1078	card->vlangrp = NULL;
1079#endif
1080	spin_lock_init(&card->lock);
1081	spin_lock_init(&card->ip_lock);
1082	spin_lock_init(&card->thread_mask_lock);
1083	card->thread_start_mask = 0;
1084	card->thread_allowed_mask = 0;
1085	card->thread_running_mask = 0;
1086	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1087	INIT_LIST_HEAD(&card->ip_list);
1088	card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1089	if (!card->ip_tbd_list) {
1090		QETH_DBF_TEXT(setup, 0, "iptbdnom");
1091		return -ENOMEM;
1092	}
1093	INIT_LIST_HEAD(card->ip_tbd_list);
1094	INIT_LIST_HEAD(&card->cmd_waiter_list);
1095	init_waitqueue_head(&card->wait_q);
1096	/* intial options */
1097	qeth_set_intial_options(card);
1098	/* IP address takeover */
1099	INIT_LIST_HEAD(&card->ipato.entries);
1100	card->ipato.enabled = 0;
1101	card->ipato.invert4 = 0;
1102	card->ipato.invert6 = 0;
1103	/* init QDIO stuff */
1104	qeth_init_qdio_info(card);
1105	return 0;
1106}
1107
1108static int
1109is_1920_device (struct qeth_card *card)
1110{
1111	int single_queue = 0;
1112	struct ccw_device *ccwdev;
1113	struct channelPath_dsc {
1114		u8 flags;
1115		u8 lsn;
1116		u8 desc;
1117		u8 chpid;
1118		u8 swla;
1119		u8 zeroes;
1120		u8 chla;
1121		u8 chpp;
1122	} *chp_dsc;
1123
1124	QETH_DBF_TEXT(setup, 2, "chk_1920");
1125
1126	ccwdev = card->data.ccwdev;
1127	chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1128	if (chp_dsc != NULL) {
1129		/* CHPP field bit 6 == 1 -> single queue */
1130		single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1131		kfree(chp_dsc);
1132	}
1133	QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1134	return single_queue;
1135}
1136
1137static int
1138qeth_determine_card_type(struct qeth_card *card)
1139{
1140	int i = 0;
1141
1142	QETH_DBF_TEXT(setup, 2, "detcdtyp");
1143
1144	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1145	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1146	while (known_devices[i][4]) {
1147		if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1148		    (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1149			card->info.type = known_devices[i][4];
1150			card->qdio.no_out_queues = known_devices[i][8];
1151			card->info.is_multicast_different = known_devices[i][9];
1152			if (is_1920_device(card)) {
1153				PRINT_INFO("Priority Queueing not able "
1154					   "due to hardware limitations!\n");
1155				card->qdio.no_out_queues = 1;
1156				card->qdio.default_out_queue = 0;
1157			}
1158			return 0;
1159		}
1160		i++;
1161	}
1162	card->info.type = QETH_CARD_TYPE_UNKNOWN;
1163	PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1164	return -ENOENT;
1165}
1166
1167static int
1168qeth_probe_device(struct ccwgroup_device *gdev)
1169{
1170	struct qeth_card *card;
1171	struct device *dev;
1172	unsigned long flags;
1173	int rc;
1174
1175	QETH_DBF_TEXT(setup, 2, "probedev");
1176
1177	dev = &gdev->dev;
1178	if (!get_device(dev))
1179		return -ENODEV;
1180
1181	QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
1182
1183	card = qeth_alloc_card();
1184	if (!card) {
1185		put_device(dev);
1186		QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1187		return -ENOMEM;
1188	}
1189	card->read.ccwdev  = gdev->cdev[0];
1190	card->write.ccwdev = gdev->cdev[1];
1191	card->data.ccwdev  = gdev->cdev[2];
1192	gdev->dev.driver_data = card;
1193	card->gdev = gdev;
1194	gdev->cdev[0]->handler = qeth_irq;
1195	gdev->cdev[1]->handler = qeth_irq;
1196	gdev->cdev[2]->handler = qeth_irq;
1197
1198	if ((rc = qeth_determine_card_type(card))){
1199		PRINT_WARN("%s: not a valid card type\n", __func__);
1200		QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1201		put_device(dev);
1202		qeth_free_card(card);
1203		return rc;
1204	}
1205	if ((rc = qeth_setup_card(card))){
1206		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1207		put_device(dev);
1208		qeth_free_card(card);
1209		return rc;
1210	}
1211	rc = qeth_create_device_attributes(dev);
1212	if (rc) {
1213		put_device(dev);
1214		qeth_free_card(card);
1215		return rc;
1216	}
1217	/* insert into our internal list */
1218	write_lock_irqsave(&qeth_card_list.rwlock, flags);
1219	list_add_tail(&card->list, &qeth_card_list.list);
1220	write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1221	return rc;
1222}
1223
1224
1225static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1226			       int *length)
1227{
1228	struct ciw *ciw;
1229	char *rcd_buf;
1230	int ret;
1231	struct qeth_channel *channel = &card->data;
1232	unsigned long flags;
1233
1234	/*
1235	 * scan for RCD command in extended SenseID data
1236	 */
1237	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1238	if (!ciw || ciw->cmd == 0)
1239		return -EOPNOTSUPP;
1240	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1241	if (!rcd_buf)
1242		return -ENOMEM;
1243
1244	channel->ccw.cmd_code = ciw->cmd;
1245	channel->ccw.cda = (__u32) __pa (rcd_buf);
1246	channel->ccw.count = ciw->count;
1247	channel->ccw.flags = CCW_FLAG_SLI;
1248	channel->state = CH_STATE_RCD;
1249	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1250	ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1251				       QETH_RCD_PARM, LPM_ANYPATH, 0,
1252				       QETH_RCD_TIMEOUT);
1253	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1254	if (!ret)
1255		wait_event(card->wait_q,
1256			   (channel->state == CH_STATE_RCD_DONE ||
1257			    channel->state == CH_STATE_DOWN));
1258	if (channel->state == CH_STATE_DOWN)
1259		ret = -EIO;
1260	else
1261		channel->state = CH_STATE_DOWN;
1262	if (ret) {
1263		kfree(rcd_buf);
1264		*buffer = NULL;
1265		*length = 0;
1266	} else {
1267		*length = ciw->count;
1268		*buffer = rcd_buf;
1269	}
1270	return ret;
1271}
1272
1273static int
1274qeth_get_unitaddr(struct qeth_card *card)
1275{
1276 	int length;
1277	char *prcd;
1278	int rc;
1279
1280	QETH_DBF_TEXT(setup, 2, "getunit");
1281	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1282	if (rc) {
1283		PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1284			  CARD_DDEV_ID(card), rc);
1285		return rc;
1286	}
1287	card->info.chpid = prcd[30];
1288	card->info.unit_addr2 = prcd[31];
1289	card->info.cula = prcd[63];
1290	card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1291			       (prcd[0x11] == _ascebc['M']));
1292	kfree(prcd);
1293	return 0;
1294}
1295
1296static void
1297qeth_init_tokens(struct qeth_card *card)
1298{
1299	card->token.issuer_rm_w = 0x00010103UL;
1300	card->token.cm_filter_w = 0x00010108UL;
1301	card->token.cm_connection_w = 0x0001010aUL;
1302	card->token.ulp_filter_w = 0x0001010bUL;
1303	card->token.ulp_connection_w = 0x0001010dUL;
1304}
1305
1306static inline __u16
1307raw_devno_from_bus_id(char *id)
1308{
1309        id += (strlen(id) - 4);
1310        return (__u16) simple_strtoul(id, &id, 16);
1311}
1312/**
1313 * setup channel
1314 */
1315static void
1316qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1317{
1318	struct qeth_card *card;
1319
1320	QETH_DBF_TEXT(trace, 4, "setupccw");
1321	card = CARD_FROM_CDEV(channel->ccwdev);
1322	if (channel == &card->read)
1323		memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1324	else
1325		memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1326	channel->ccw.count = len;
1327	channel->ccw.cda = (__u32) __pa(iob);
1328}
1329
1330/**
1331 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1332 */
1333static struct qeth_cmd_buffer *
1334__qeth_get_buffer(struct qeth_channel *channel)
1335{
1336	__u8 index;
1337
1338	QETH_DBF_TEXT(trace, 6, "getbuff");
1339	index = channel->io_buf_no;
1340	do {
1341		if (channel->iob[index].state == BUF_STATE_FREE) {
1342			channel->iob[index].state = BUF_STATE_LOCKED;
1343			channel->io_buf_no = (channel->io_buf_no + 1) %
1344				QETH_CMD_BUFFER_NO;
1345			memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1346			return channel->iob + index;
1347		}
1348		index = (index + 1) % QETH_CMD_BUFFER_NO;
1349	} while(index != channel->io_buf_no);
1350
1351	return NULL;
1352}
1353
1354/**
1355 * release command buffer
1356 */
1357static void
1358qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1359{
1360	unsigned long flags;
1361
1362	QETH_DBF_TEXT(trace, 6, "relbuff");
1363	spin_lock_irqsave(&channel->iob_lock, flags);
1364	memset(iob->data, 0, QETH_BUFSIZE);
1365	iob->state = BUF_STATE_FREE;
1366	iob->callback = qeth_send_control_data_cb;
1367	iob->rc = 0;
1368	spin_unlock_irqrestore(&channel->iob_lock, flags);
1369}
1370
1371static struct qeth_cmd_buffer *
1372qeth_get_buffer(struct qeth_channel *channel)
1373{
1374	struct qeth_cmd_buffer *buffer = NULL;
1375	unsigned long flags;
1376
1377	spin_lock_irqsave(&channel->iob_lock, flags);
1378	buffer = __qeth_get_buffer(channel);
1379	spin_unlock_irqrestore(&channel->iob_lock, flags);
1380	return buffer;
1381}
1382
1383static struct qeth_cmd_buffer *
1384qeth_wait_for_buffer(struct qeth_channel *channel)
1385{
1386	struct qeth_cmd_buffer *buffer;
1387	wait_event(channel->wait_q,
1388		   ((buffer = qeth_get_buffer(channel)) != NULL));
1389	return buffer;
1390}
1391
1392static void
1393qeth_clear_cmd_buffers(struct qeth_channel *channel)
1394{
1395	int cnt;
1396
1397	for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1398		qeth_release_buffer(channel,&channel->iob[cnt]);
1399	channel->buf_no = 0;
1400	channel->io_buf_no = 0;
1401}
1402
1403/**
1404 * start IDX for read and write channel
1405 */
1406static int
1407qeth_idx_activate_get_answer(struct qeth_channel *channel,
1408			      void (*idx_reply_cb)(struct qeth_channel *,
1409						   struct qeth_cmd_buffer *))
1410{
1411	struct qeth_cmd_buffer *iob;
1412	unsigned long flags;
1413	int rc;
1414	struct qeth_card *card;
1415
1416	QETH_DBF_TEXT(setup, 2, "idxanswr");
1417	card = CARD_FROM_CDEV(channel->ccwdev);
1418	iob = qeth_get_buffer(channel);
1419	iob->callback = idx_reply_cb;
1420	memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1421	channel->ccw.count = QETH_BUFSIZE;
1422	channel->ccw.cda = (__u32) __pa(iob->data);
1423
1424	wait_event(card->wait_q,
1425		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1426	QETH_DBF_TEXT(setup, 6, "noirqpnd");
1427	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1428	rc = ccw_device_start(channel->ccwdev,
1429			      &channel->ccw,(addr_t) iob, 0, 0);
1430	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1431
1432	if (rc) {
1433		PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1434		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1435		atomic_set(&channel->irq_pending, 0);
1436		wake_up(&card->wait_q);
1437		return rc;
1438	}
1439	rc = wait_event_interruptible_timeout(card->wait_q,
1440			 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1441	if (rc == -ERESTARTSYS)
1442		return rc;
1443	if (channel->state != CH_STATE_UP){
1444		rc = -ETIME;
1445		QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1446		qeth_clear_cmd_buffers(channel);
1447	} else
1448		rc = 0;
1449	return rc;
1450}
1451
1452static int
1453qeth_idx_activate_channel(struct qeth_channel *channel,
1454			   void (*idx_reply_cb)(struct qeth_channel *,
1455						struct qeth_cmd_buffer *))
1456{
1457	struct qeth_card *card;
1458	struct qeth_cmd_buffer *iob;
1459	unsigned long flags;
1460	__u16 temp;
1461	int rc;
1462
1463	card = CARD_FROM_CDEV(channel->ccwdev);
1464
1465	QETH_DBF_TEXT(setup, 2, "idxactch");
1466
1467	iob = qeth_get_buffer(channel);
1468	iob->callback = idx_reply_cb;
1469	memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1470	channel->ccw.count = IDX_ACTIVATE_SIZE;
1471	channel->ccw.cda = (__u32) __pa(iob->data);
1472	if (channel == &card->write) {
1473		memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1474		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1475		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1476		card->seqno.trans_hdr++;
1477	} else {
1478		memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1479		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1480		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1481	}
1482	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1483	       &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1484	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1485	       &card->info.func_level,sizeof(__u16));
1486	temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1487	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1488	temp = (card->info.cula << 8) + card->info.unit_addr2;
1489	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1490
1491	wait_event(card->wait_q,
1492		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1493	QETH_DBF_TEXT(setup, 6, "noirqpnd");
1494	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1495	rc = ccw_device_start(channel->ccwdev,
1496			      &channel->ccw,(addr_t) iob, 0, 0);
1497	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1498
1499	if (rc) {
1500		PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1501		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1502		atomic_set(&channel->irq_pending, 0);
1503		wake_up(&card->wait_q);
1504		return rc;
1505	}
1506	rc = wait_event_interruptible_timeout(card->wait_q,
1507			channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1508	if (rc == -ERESTARTSYS)
1509		return rc;
1510	if (channel->state != CH_STATE_ACTIVATING) {
1511		PRINT_WARN("qeth: IDX activate timed out!\n");
1512		QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1513		qeth_clear_cmd_buffers(channel);
1514		return -ETIME;
1515	}
1516	return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1517}
1518
1519static int
1520qeth_peer_func_level(int level)
1521{
1522	if ((level & 0xff) == 8)
1523		return (level & 0xff) + 0x400;
1524	if (((level >> 8) & 3) == 1)
1525		return (level & 0xff) + 0x200;
1526	return level;
1527}
1528
1529static void
1530qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1531{
1532	struct qeth_card *card;
1533	__u16 temp;
1534
1535	QETH_DBF_TEXT(setup ,2, "idxwrcb");
1536
1537	if (channel->state == CH_STATE_DOWN) {
1538		channel->state = CH_STATE_ACTIVATING;
1539		goto out;
1540	}
1541	card = CARD_FROM_CDEV(channel->ccwdev);
1542
1543	if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1544		PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1545			  "reply\n", CARD_WDEV_ID(card));
1546		goto out;
1547	}
1548	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1549	if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1550		PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1551			   "function level mismatch "
1552			   "(sent: 0x%x, received: 0x%x)\n",
1553			   CARD_WDEV_ID(card), card->info.func_level, temp);
1554		goto out;
1555	}
1556	channel->state = CH_STATE_UP;
1557out:
1558	qeth_release_buffer(channel, iob);
1559}
1560
1561static int
1562qeth_check_idx_response(unsigned char *buffer)
1563{
1564	if (!buffer)
1565		return 0;
1566
1567	QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1568	if ((buffer[2] & 0xc0) == 0xc0) {
1569		PRINT_WARN("received an IDX TERMINATE "
1570			   "with cause code 0x%02x%s\n",
1571			   buffer[4],
1572			   ((buffer[4] == 0x22) ?
1573			    " -- try another portname" : ""));
1574		QETH_DBF_TEXT(trace, 2, "ckidxres");
1575		QETH_DBF_TEXT(trace, 2, " idxterm");
1576		QETH_DBF_TEXT_(trace, 2, "  rc%d", -EIO);
1577		return -EIO;
1578	}
1579	return 0;
1580}
1581
1582static void
1583qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1584{
1585	struct qeth_card *card;
1586	__u16 temp;
1587
1588	QETH_DBF_TEXT(setup , 2, "idxrdcb");
1589	if (channel->state == CH_STATE_DOWN) {
1590		channel->state = CH_STATE_ACTIVATING;
1591		goto out;
1592	}
1593
1594	card = CARD_FROM_CDEV(channel->ccwdev);
1595	if (qeth_check_idx_response(iob->data)) {
1596			goto out;
1597	}
1598	if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1599		PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1600			  "reply\n", CARD_RDEV_ID(card));
1601		goto out;
1602	}
1603
1604/**
1605 * temporary fix for microcode bug
1606 * to revert it,replace OR by AND
1607 */
1608	if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1609	     (card->info.type == QETH_CARD_TYPE_OSAE) )
1610		card->info.portname_required = 1;
1611
1612	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1613	if (temp != qeth_peer_func_level(card->info.func_level)) {
1614		PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1615			   "level mismatch (sent: 0x%x, received: 0x%x)\n",
1616			   CARD_RDEV_ID(card), card->info.func_level, temp);
1617		goto out;
1618	}
1619	memcpy(&card->token.issuer_rm_r,
1620	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1621	       QETH_MPC_TOKEN_LENGTH);
1622	memcpy(&card->info.mcl_level[0],
1623	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1624	channel->state = CH_STATE_UP;
1625out:
1626	qeth_release_buffer(channel,iob);
1627}
1628
1629static int
1630qeth_issue_next_read(struct qeth_card *card)
1631{
1632	int rc;
1633	struct qeth_cmd_buffer *iob;
1634
1635	QETH_DBF_TEXT(trace,5,"issnxrd");
1636	if (card->read.state != CH_STATE_UP)
1637		return -EIO;
1638	iob = qeth_get_buffer(&card->read);
1639	if (!iob) {
1640		PRINT_WARN("issue_next_read failed: no iob available!\n");
1641		return -ENOMEM;
1642	}
1643	qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1644	QETH_DBF_TEXT(trace, 6, "noirqpnd");
1645	rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1646			      (addr_t) iob, 0, 0);
1647	if (rc) {
1648		PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1649		atomic_set(&card->read.irq_pending, 0);
1650		qeth_schedule_recovery(card);
1651		wake_up(&card->wait_q);
1652	}
1653	return rc;
1654}
1655
1656static struct qeth_reply *
1657qeth_alloc_reply(struct qeth_card *card)
1658{
1659	struct qeth_reply *reply;
1660
1661	reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1662	if (reply){
1663		atomic_set(&reply->refcnt, 1);
1664		atomic_set(&reply->received, 0);
1665		reply->card = card;
1666	};
1667	return reply;
1668}
1669
1670static void
1671qeth_get_reply(struct qeth_reply *reply)
1672{
1673	WARN_ON(atomic_read(&reply->refcnt) <= 0);
1674	atomic_inc(&reply->refcnt);
1675}
1676
1677static void
1678qeth_put_reply(struct qeth_reply *reply)
1679{
1680	WARN_ON(atomic_read(&reply->refcnt) <= 0);
1681	if (atomic_dec_and_test(&reply->refcnt))
1682		kfree(reply);
1683}
1684
1685static void
1686qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, struct qeth_card *card)
1687{
1688	int rc;
1689	int com;
1690	char * ipa_name;
1691
1692	com = cmd->hdr.command;
1693	rc  = cmd->hdr.return_code;
1694	ipa_name = qeth_get_ipa_cmd_name(com);
1695
1696	PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com,
1697		   QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc));
1698}
1699
1700static struct qeth_ipa_cmd *
1701qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1702{
1703	struct qeth_ipa_cmd *cmd = NULL;
1704
1705	QETH_DBF_TEXT(trace,5,"chkipad");
1706	if (IS_IPA(iob->data)){
1707		cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1708		if (IS_IPA_REPLY(cmd)) {
1709			if (cmd->hdr.return_code)
1710				qeth_issue_ipa_msg(cmd, card);
1711			return cmd;
1712		}
1713		else {
1714			switch (cmd->hdr.command) {
1715			case IPA_CMD_STOPLAN:
1716				PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1717					   "there is a network problem or "
1718					   "someone pulled the cable or "
1719					   "disabled the port.\n",
1720					   QETH_CARD_IFNAME(card),
1721					   card->info.chpid);
1722				card->lan_online = 0;
1723				if (card->dev && netif_carrier_ok(card->dev))
1724					netif_carrier_off(card->dev);
1725				return NULL;
1726			case IPA_CMD_STARTLAN:
1727				PRINT_INFO("Link reestablished on %s "
1728					   "(CHPID 0x%X). Scheduling "
1729					   "IP address reset.\n",
1730					   QETH_CARD_IFNAME(card),
1731					   card->info.chpid);
1732				netif_carrier_on(card->dev);
1733				qeth_schedule_recovery(card);
1734				return NULL;
1735			case IPA_CMD_MODCCID:
1736				return cmd;
1737			case IPA_CMD_REGISTER_LOCAL_ADDR:
1738				QETH_DBF_TEXT(trace,3, "irla");
1739				break;
1740			case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1741				QETH_DBF_TEXT(trace,3, "urla");
1742				break;
1743			default:
1744				PRINT_WARN("Received data is IPA "
1745					   "but not a reply!\n");
1746				break;
1747			}
1748		}
1749	}
1750	return cmd;
1751}
1752
1753/**
1754 * wake all waiting ipa commands
1755 */
1756static void
1757qeth_clear_ipacmd_list(struct qeth_card *card)
1758{
1759	struct qeth_reply *reply, *r;
1760	unsigned long flags;
1761
1762	QETH_DBF_TEXT(trace, 4, "clipalst");
1763
1764	spin_lock_irqsave(&card->lock, flags);
1765	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1766		qeth_get_reply(reply);
1767		reply->rc = -EIO;
1768		atomic_inc(&reply->received);
1769		list_del_init(&reply->list);
1770		wake_up(&reply->wait_q);
1771		qeth_put_reply(reply);
1772	}
1773	spin_unlock_irqrestore(&card->lock, flags);
1774}
1775
1776static void
1777qeth_send_control_data_cb(struct qeth_channel *channel,
1778			  struct qeth_cmd_buffer *iob)
1779{
1780	struct qeth_card *card;
1781	struct qeth_reply *reply, *r;
1782	struct qeth_ipa_cmd *cmd;
1783	unsigned long flags;
1784	int keep_reply;
1785
1786	QETH_DBF_TEXT(trace,4,"sndctlcb");
1787
1788	card = CARD_FROM_CDEV(channel->ccwdev);
1789	if (qeth_check_idx_response(iob->data)) {
1790		qeth_clear_ipacmd_list(card);
1791		qeth_schedule_recovery(card);
1792		goto out;
1793	}
1794
1795	cmd = qeth_check_ipa_data(card, iob);
1796	if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1797		goto out;
1798	/*in case of OSN : check if cmd is set */
1799	if (card->info.type == QETH_CARD_TYPE_OSN &&
1800	    cmd &&
1801	    cmd->hdr.command != IPA_CMD_STARTLAN &&
1802	    card->osn_info.assist_cb != NULL) {
1803		card->osn_info.assist_cb(card->dev, cmd);
1804		goto out;
1805	}
1806
1807	spin_lock_irqsave(&card->lock, flags);
1808	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1809		if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1810		    ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1811			qeth_get_reply(reply);
1812			list_del_init(&reply->list);
1813			spin_unlock_irqrestore(&card->lock, flags);
1814			keep_reply = 0;
1815			if (reply->callback != NULL) {
1816				if (cmd) {
1817					reply->offset = (__u16)((char*)cmd -
1818								(char *)iob->data);
1819					keep_reply = reply->callback(card,
1820							reply,
1821							(unsigned long)cmd);
1822				} else
1823					keep_reply = reply->callback(card,
1824							reply,
1825							(unsigned long)iob);
1826			}
1827			if (cmd)
1828				reply->rc = (u16) cmd->hdr.return_code;
1829			else if (iob->rc)
1830				reply->rc = iob->rc;
1831			if (keep_reply) {
1832				spin_lock_irqsave(&card->lock, flags);
1833				list_add_tail(&reply->list,
1834					      &card->cmd_waiter_list);
1835				spin_unlock_irqrestore(&card->lock, flags);
1836			} else {
1837				atomic_inc(&reply->received);
1838				wake_up(&reply->wait_q);
1839			}
1840			qeth_put_reply(reply);
1841			goto out;
1842		}
1843	}
1844	spin_unlock_irqrestore(&card->lock, flags);
1845out:
1846	memcpy(&card->seqno.pdu_hdr_ack,
1847		QETH_PDU_HEADER_SEQ_NO(iob->data),
1848		QETH_SEQ_NO_LENGTH);
1849	qeth_release_buffer(channel,iob);
1850}
1851
1852static void
1853qeth_prepare_control_data(struct qeth_card *card, int len,
1854			  struct qeth_cmd_buffer *iob)
1855{
1856	qeth_setup_ccw(&card->write,iob->data,len);
1857	iob->callback = qeth_release_buffer;
1858
1859	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1860	       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1861	card->seqno.trans_hdr++;
1862	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1863	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1864	card->seqno.pdu_hdr++;
1865	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1866	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1867	QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1868}
1869
1870static int
1871qeth_send_control_data(struct qeth_card *card, int len,
1872		       struct qeth_cmd_buffer *iob,
1873		       int (*reply_cb)
1874		       (struct qeth_card *, struct qeth_reply*, unsigned long),
1875		       void *reply_param)
1876
1877{
1878	int rc;
1879	unsigned long flags;
1880	struct qeth_reply *reply = NULL;
1881	unsigned long timeout;
1882
1883	QETH_DBF_TEXT(trace, 2, "sendctl");
1884
1885	reply = qeth_alloc_reply(card);
1886	if (!reply) {
1887		PRINT_WARN("Could no alloc qeth_reply!\n");
1888		return -ENOMEM;
1889	}
1890	reply->callback = reply_cb;
1891	reply->param = reply_param;
1892	if (card->state == CARD_STATE_DOWN)
1893		reply->seqno = QETH_IDX_COMMAND_SEQNO;
1894	else
1895		reply->seqno = card->seqno.ipa++;
1896	init_waitqueue_head(&reply->wait_q);
1897	spin_lock_irqsave(&card->lock, flags);
1898	list_add_tail(&reply->list, &card->cmd_waiter_list);
1899	spin_unlock_irqrestore(&card->lock, flags);
1900	QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1901
1902	while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1903	qeth_prepare_control_data(card, len, iob);
1904
1905	if (IS_IPA(iob->data))
1906		timeout = jiffies + QETH_IPA_TIMEOUT;
1907	else
1908		timeout = jiffies + QETH_TIMEOUT;
1909
1910	QETH_DBF_TEXT(trace, 6, "noirqpnd");
1911	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1912	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1913			      (addr_t) iob, 0, 0);
1914	spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1915	if (rc){
1916		PRINT_WARN("qeth_send_control_data: "
1917			   "ccw_device_start rc = %i\n", rc);
1918		QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1919		spin_lock_irqsave(&card->lock, flags);
1920		list_del_init(&reply->list);
1921		qeth_put_reply(reply);
1922		spin_unlock_irqrestore(&card->lock, flags);
1923		qeth_release_buffer(iob->channel, iob);
1924		atomic_set(&card->write.irq_pending, 0);
1925		wake_up(&card->wait_q);
1926		return rc;
1927	}
1928	while (!atomic_read(&reply->received)) {
1929		if (time_after(jiffies, timeout)) {
1930			spin_lock_irqsave(&reply->card->lock, flags);
1931			list_del_init(&reply->list);
1932			spin_unlock_irqrestore(&reply->card->lock, flags);
1933			reply->rc = -ETIME;
1934			atomic_inc(&reply->received);
1935			wake_up(&reply->wait_q);
1936		}
1937	};
1938	rc = reply->rc;
1939	qeth_put_reply(reply);
1940	return rc;
1941}
1942
1943static int
1944qeth_osn_send_control_data(struct qeth_card *card, int len,
1945			   struct qeth_cmd_buffer *iob)
1946{
1947	unsigned long flags;
1948	int rc = 0;
1949
1950	QETH_DBF_TEXT(trace, 5, "osndctrd");
1951
1952	wait_event(card->wait_q,
1953		   atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1954	qeth_prepare_control_data(card, len, iob);
1955	QETH_DBF_TEXT(trace, 6, "osnoirqp");
1956	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1957	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1958			      (addr_t) iob, 0, 0);
1959	spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1960	if (rc){
1961		PRINT_WARN("qeth_osn_send_control_data: "
1962			   "ccw_device_start rc = %i\n", rc);
1963		QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1964		qeth_release_buffer(iob->channel, iob);
1965		atomic_set(&card->write.irq_pending, 0);
1966		wake_up(&card->wait_q);
1967	}
1968	return rc;
1969}
1970
1971static inline void
1972qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1973		     char prot_type)
1974{
1975	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1976	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1977	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1978	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1979}
1980
1981static int
1982qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1983		      int data_len)
1984{
1985	u16 s1, s2;
1986
1987	QETH_DBF_TEXT(trace,4,"osndipa");
1988
1989	qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
1990	s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
1991	s2 = (u16)data_len;
1992	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
1993	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
1994	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
1995	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
1996	return qeth_osn_send_control_data(card, s1, iob);
1997}
1998
1999static int
2000qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2001		  int (*reply_cb)
2002		  (struct qeth_card *,struct qeth_reply*, unsigned long),
2003		  void *reply_param)
2004{
2005	int rc;
2006	char prot_type;
2007
2008	QETH_DBF_TEXT(trace,4,"sendipa");
2009
2010	if (card->options.layer2)
2011		if (card->info.type == QETH_CARD_TYPE_OSN)
2012			prot_type = QETH_PROT_OSN2;
2013		else
2014			prot_type = QETH_PROT_LAYER2;
2015	else
2016		prot_type = QETH_PROT_TCPIP;
2017	qeth_prepare_ipa_cmd(card,iob,prot_type);
2018	rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
2019				    reply_cb, reply_param);
2020	return rc;
2021}
2022
2023
2024static int
2025qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2026		  unsigned long data)
2027{
2028	struct qeth_cmd_buffer *iob;
2029
2030	QETH_DBF_TEXT(setup, 2, "cmenblcb");
2031
2032	iob = (struct qeth_cmd_buffer *) data;
2033	memcpy(&card->token.cm_filter_r,
2034	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2035	       QETH_MPC_TOKEN_LENGTH);
2036	QETH_DBF_TEXT_(setup, 2, "  rc%d", iob->rc);
2037	return 0;
2038}
2039
2040static int
2041qeth_cm_enable(struct qeth_card *card)
2042{
2043	int rc;
2044	struct qeth_cmd_buffer *iob;
2045
2046	QETH_DBF_TEXT(setup,2,"cmenable");
2047
2048	iob = qeth_wait_for_buffer(&card->write);
2049	memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2050	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2051	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2052	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2053	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2054
2055	rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2056				    qeth_cm_enable_cb, NULL);
2057	return rc;
2058}
2059
2060static int
2061qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2062		 unsigned long data)
2063{
2064
2065	struct qeth_cmd_buffer *iob;
2066
2067	QETH_DBF_TEXT(setup, 2, "cmsetpcb");
2068
2069	iob = (struct qeth_cmd_buffer *) data;
2070	memcpy(&card->token.cm_connection_r,
2071	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2072	       QETH_MPC_TOKEN_LENGTH);
2073	QETH_DBF_TEXT_(setup, 2, "  rc%d", iob->rc);
2074	return 0;
2075}
2076
2077static int
2078qeth_cm_setup(struct qeth_card *card)
2079{
2080	int rc;
2081	struct qeth_cmd_buffer *iob;
2082
2083	QETH_DBF_TEXT(setup,2,"cmsetup");
2084
2085	iob = qeth_wait_for_buffer(&card->write);
2086	memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2087	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2088	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2089	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2090	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2091	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2092	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2093	rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2094				    qeth_cm_setup_cb, NULL);
2095	return rc;
2096
2097}
2098
2099static int
2100qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2101		   unsigned long data)
2102{
2103
2104	__u16 mtu, framesize;
2105	__u16 len;
2106	__u8 link_type;
2107	struct qeth_cmd_buffer *iob;
2108
2109	QETH_DBF_TEXT(setup, 2, "ulpenacb");
2110
2111	iob = (struct qeth_cmd_buffer *) data;
2112	memcpy(&card->token.ulp_filter_r,
2113	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2114	       QETH_MPC_TOKEN_LENGTH);
2115	if (qeth_get_mtu_out_of_mpc(card->info.type)) {
2116		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2117		mtu = qeth_get_mtu_outof_framesize(framesize);
2118		if (!mtu) {
2119			iob->rc = -EINVAL;
2120			QETH_DBF_TEXT_(setup, 2, "  rc%d", iob->rc);
2121			return 0;
2122		}
2123		card->info.max_mtu = mtu;
2124		card->info.initial_mtu = mtu;
2125		card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
2126	} else {
2127		card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
2128		card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
2129		card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2130	}
2131
2132	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2133	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2134		memcpy(&link_type,
2135		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2136		card->info.link_type = link_type;
2137	} else
2138		card->info.link_type = 0;
2139	QETH_DBF_TEXT_(setup, 2, "  rc%d", iob->rc);
2140	return 0;
2141}
2142
2143static int
2144qeth_ulp_enable(struct qeth_card *card)
2145{
2146	int rc;
2147	char prot_type;
2148	struct qeth_cmd_buffer *iob;
2149
2150	QETH_DBF_TEXT(setup,2,"ulpenabl");
2151
2152	iob = qeth_wait_for_buffer(&card->write);
2153	memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2154
2155	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2156		(__u8) card->info.portno;
2157	if (card->options.layer2)
2158		if (card->info.type == QETH_CARD_TYPE_OSN)
2159			prot_type = QETH_PROT_OSN2;
2160		else
2161			prot_type = QETH_PROT_LAYER2;
2162	else
2163		prot_type = QETH_PROT_TCPIP;
2164
2165	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2166	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2167	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2168	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2169	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2170	memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2171	       card->info.portname, 9);
2172	rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2173				    qeth_ulp_enable_cb, NULL);
2174	return rc;
2175
2176}
2177
2178static int
2179qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2180		  unsigned long data)
2181{
2182	struct qeth_cmd_buffer *iob;
2183
2184	QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2185
2186	iob = (struct qeth_cmd_buffer *) data;
2187	memcpy(&card->token.ulp_connection_r,
2188	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2189	       QETH_MPC_TOKEN_LENGTH);
2190	QETH_DBF_TEXT_(setup, 2, "  rc%d", iob->rc);
2191	return 0;
2192}
2193
2194static int
2195qeth_ulp_setup(struct qeth_card *card)
2196{
2197	int rc;
2198	__u16 temp;
2199	struct qeth_cmd_buffer *iob;
2200	struct ccw_dev_id dev_id;
2201
2202	QETH_DBF_TEXT(setup,2,"ulpsetup");
2203
2204	iob = qeth_wait_for_buffer(&card->write);
2205	memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2206
2207	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2208	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2209	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2210	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2211	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2212	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2213
2214	ccw_device_get_id(CARD_DDEV(card), &dev_id);
2215	memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2216	temp = (card->info.cula << 8) + card->info.unit_addr2;
2217	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2218	rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2219				    qeth_ulp_setup_cb, NULL);
2220	return rc;
2221}
2222
2223static inline int
2224qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2225		       unsigned int siga_error, const char *dbftext)
2226{
2227	if (qdio_error || siga_error) {
2228		QETH_DBF_TEXT(trace, 2, dbftext);
2229		QETH_DBF_TEXT(qerr, 2, dbftext);
2230		QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2231			       buf->element[15].flags & 0xff);
2232		QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2233			       buf->element[14].flags & 0xff);
2234		QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2235		QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2236		return 1;
2237	}
2238	return 0;
2239}
2240
2241static struct sk_buff *
2242qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2243{
2244	struct sk_buff* skb;
2245	int add_len;
2246
2247	add_len = 0;
2248	if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
2249		add_len = sizeof(struct qeth_hdr);
2250#ifdef CONFIG_QETH_VLAN
2251	else
2252		add_len = VLAN_HLEN;
2253#endif
2254	skb = dev_alloc_skb(length + add_len);
2255	if (skb && add_len)
2256		skb_reserve(skb, add_len);
2257	return skb;
2258}
2259
2260static struct sk_buff *
2261qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2262		  struct qdio_buffer_element **__element, int *__offset,
2263		  struct qeth_hdr **hdr)
2264{
2265	struct qdio_buffer_element *element = *__element;
2266	int offset = *__offset;
2267	struct sk_buff *skb = NULL;
2268	int skb_len;
2269	void *data_ptr;
2270	int data_len;
2271
2272	QETH_DBF_TEXT(trace,6,"nextskb");
2273	/* qeth_hdr must not cross element boundaries */
2274	if (element->length < offset + sizeof(struct qeth_hdr)){
2275		if (qeth_is_last_sbale(element))
2276			return NULL;
2277		element++;
2278		offset = 0;
2279		if (element->length < sizeof(struct qeth_hdr))
2280			return NULL;
2281	}
2282	*hdr = element->addr + offset;
2283
2284	offset += sizeof(struct qeth_hdr);
2285	if (card->options.layer2)
2286		if (card->info.type == QETH_CARD_TYPE_OSN)
2287			skb_len = (*hdr)->hdr.osn.pdu_length;
2288		else
2289			skb_len = (*hdr)->hdr.l2.pkt_length;
2290	else
2291		skb_len = (*hdr)->hdr.l3.length;
2292
2293	if (!skb_len)
2294		return NULL;
2295	if (card->options.fake_ll){
2296		if(card->dev->type == ARPHRD_IEEE802_TR){
2297			if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
2298				goto no_mem;
2299			skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
2300		} else {
2301			if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
2302				goto no_mem;
2303			skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
2304		}
2305	} else if (!(skb = qeth_get_skb(skb_len, *hdr)))
2306		goto no_mem;
2307	data_ptr = element->addr + offset;
2308	while (skb_len) {
2309		data_len = min(skb_len, (int)(element->length - offset));
2310		if (data_len)
2311			memcpy(skb_put(skb, data_len), data_ptr, data_len);
2312		skb_len -= data_len;
2313		if (skb_len){
2314			if (qeth_is_last_sbale(element)){
2315				QETH_DBF_TEXT(trace,4,"unexeob");
2316				QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2317				QETH_DBF_TEXT(qerr,2,"unexeob");
2318				QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2319				QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2320				dev_kfree_skb_any(skb);
2321				card->stats.rx_errors++;
2322				return NULL;
2323			}
2324			element++;
2325			offset = 0;
2326			data_ptr = element->addr;
2327		} else {
2328			offset += data_len;
2329		}
2330	}
2331	*__element = element;
2332	*__offset = offset;
2333	return skb;
2334no_mem:
2335	if (net_ratelimit()){
2336		PRINT_WARN("No memory for packet received on %s.\n",
2337			   QETH_CARD_IFNAME(card));
2338		QETH_DBF_TEXT(trace,2,"noskbmem");
2339		QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2340	}
2341	card->stats.rx_dropped++;
2342	return NULL;
2343}
2344
2345static __be16
2346qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2347{
2348	struct qeth_card *card;
2349	struct ethhdr *eth;
2350
2351	QETH_DBF_TEXT(trace,6,"typtrans");
2352
2353	card = (struct qeth_card *)dev->priv;
2354#ifdef CONFIG_TR
2355	if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2356	    (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2357	 	return tr_type_trans(skb,dev);
2358#endif /* CONFIG_TR */
2359	skb_reset_mac_header(skb);
2360	skb_pull(skb, ETH_HLEN );
2361	eth = eth_hdr(skb);
2362
2363	if (*eth->h_dest & 1) {
2364		if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2365			skb->pkt_type = PACKET_BROADCAST;
2366		else
2367			skb->pkt_type = PACKET_MULTICAST;
2368	} else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2369		skb->pkt_type = PACKET_OTHERHOST;
2370
2371	if (ntohs(eth->h_proto) >= 1536)
2372		return eth->h_proto;
2373	if (*(unsigned short *) (skb->data) == 0xFFFF)
2374		return htons(ETH_P_802_3);
2375	return htons(ETH_P_802_2);
2376}
2377
2378static void
2379qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2380			 struct qeth_hdr *hdr)
2381{
2382	struct trh_hdr *fake_hdr;
2383	struct trllc *fake_llc;
2384	struct iphdr *ip_hdr;
2385
2386	QETH_DBF_TEXT(trace,5,"skbfktr");
2387	skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR);
2388	/* this is a fake ethernet header */
2389	fake_hdr = tr_hdr(skb);
2390
2391	/* the destination MAC address */
2392	switch (skb->pkt_type){
2393	case PACKET_MULTICAST:
2394		switch (skb->protocol){
2395#ifdef CONFIG_QETH_IPV6
2396		case __constant_htons(ETH_P_IPV6):
2397			ndisc_mc_map((struct in6_addr *)
2398				     skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2399				     fake_hdr->daddr, card->dev, 0);
2400			break;
2401#endif /* CONFIG_QETH_IPV6 */
2402		case __constant_htons(ETH_P_IP):
2403			ip_hdr = (struct iphdr *)skb->data;
2404			ip_tr_mc_map(ip_hdr->daddr, fake_hdr->daddr);
2405			break;
2406		default:
2407			memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2408		}
2409		break;
2410	case PACKET_BROADCAST:
2411		memset(fake_hdr->daddr, 0xff, TR_ALEN);
2412		break;
2413	default:
2414		memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2415	}
2416	/* the source MAC address */
2417	if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2418		memcpy(fake_hdr->saddr, &hdr->hdr.l3.dest_addr[2], TR_ALEN);
2419	else
2420		memset(fake_hdr->saddr, 0, TR_ALEN);
2421	fake_hdr->rcf=0;
2422	fake_llc = (struct trllc*)&(fake_hdr->rcf);
2423	fake_llc->dsap = EXTENDED_SAP;
2424	fake_llc->ssap = EXTENDED_SAP;
2425	fake_llc->llc  = UI_CMD;
2426	fake_llc->protid[0] = 0;
2427	fake_llc->protid[1] = 0;
2428	fake_llc->protid[2] = 0;
2429	fake_llc->ethertype = ETH_P_IP;
2430}
2431
2432static void
2433qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2434			 struct qeth_hdr *hdr)
2435{
2436	struct ethhdr *fake_hdr;
2437	struct iphdr *ip_hdr;
2438
2439	QETH_DBF_TEXT(trace,5,"skbfketh");
2440	skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH);
2441	/* this is a fake ethernet header */
2442	fake_hdr = eth_hdr(skb);
2443
2444	/* the destination MAC address */
2445	switch (skb->pkt_type){
2446	case PACKET_MULTICAST:
2447		switch (skb->protocol){
2448#ifdef CONFIG_QETH_IPV6
2449		case __constant_htons(ETH_P_IPV6):
2450			ndisc_mc_map((struct in6_addr *)
2451				     skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2452				     fake_hdr->h_dest, card->dev, 0);
2453			break;
2454#endif /* CONFIG_QETH_IPV6 */
2455		case __constant_htons(ETH_P_IP):
2456			ip_hdr = (struct iphdr *)skb->data;
2457			ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2458			break;
2459		default:
2460			memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2461		}
2462		break;
2463	case PACKET_BROADCAST:
2464		memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2465		break;
2466	default:
2467		memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2468	}
2469	/* the source MAC address */
2470	if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2471		memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2472	else
2473		memset(fake_hdr->h_source, 0, ETH_ALEN);
2474	/* the protocol */
2475	fake_hdr->h_proto = skb->protocol;
2476}
2477
2478static inline void
2479qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2480			struct qeth_hdr *hdr)
2481{
2482	if (card->dev->type == ARPHRD_IEEE802_TR)
2483		qeth_rebuild_skb_fake_ll_tr(card, skb, hdr);
2484	else
2485		qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2486}
2487
2488static inline void
2489qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2490			struct qeth_hdr *hdr)
2491{
2492	skb->pkt_type = PACKET_HOST;
2493	skb->protocol = qeth_type_trans(skb, skb->dev);
2494	if (card->options.checksum_type == NO_CHECKSUMMING)
2495		skb->ip_summed = CHECKSUM_UNNECESSARY;
2496	else
2497		skb->ip_summed = CHECKSUM_NONE;
2498	*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2499}
2500
2501static __u16
2502qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2503		 struct qeth_hdr *hdr)
2504{
2505	unsigned short vlan_id = 0;
2506#ifdef CONFIG_QETH_IPV6
2507	if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2508		skb->pkt_type = PACKET_HOST;
2509		skb->protocol = qeth_type_trans(skb, card->dev);
2510		return 0;
2511	}
2512#endif /* CONFIG_QETH_IPV6 */
2513	skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2514			      ETH_P_IP);
2515	switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2516	case QETH_CAST_UNICAST:
2517		skb->pkt_type = PACKET_HOST;
2518		break;
2519	case QETH_CAST_MULTICAST:
2520		skb->pkt_type = PACKET_MULTICAST;
2521		card->stats.multicast++;
2522		break;
2523	case QETH_CAST_BROADCAST:
2524		skb->pkt_type = PACKET_BROADCAST;
2525		card->stats.multicast++;
2526		break;
2527	case QETH_CAST_ANYCAST:
2528	case QETH_CAST_NOCAST:
2529	default:
2530		skb->pkt_type = PACKET_HOST;
2531	}
2532
2533	if (hdr->hdr.l3.ext_flags &
2534	    (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2535		vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2536			hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2537	}
2538
2539	if (card->options.fake_ll)
2540		qeth_rebuild_skb_fake_ll(card, skb, hdr);
2541	else
2542		skb_reset_mac_header(skb);
2543	skb->ip_summed = card->options.checksum_type;
2544	if (card->options.checksum_type == HW_CHECKSUMMING){
2545		if ( (hdr->hdr.l3.ext_flags &
2546		      (QETH_HDR_EXT_CSUM_HDR_REQ |
2547		       QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2548		     (QETH_HDR_EXT_CSUM_HDR_REQ |
2549		      QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2550			skb->ip_summed = CHECKSUM_UNNECESSARY;
2551		else
2552			skb->ip_summed = SW_CHECKSUMMING;
2553	}
2554	return vlan_id;
2555}
2556
2557static void
2558qeth_process_inbound_buffer(struct qeth_card *card,
2559			    struct qeth_qdio_buffer *buf, int index)
2560{
2561	struct qdio_buffer_element *element;
2562	struct sk_buff *skb;
2563	struct qeth_hdr *hdr;
2564	int offset;
2565	int rxrc;
2566	__u16 vlan_tag = 0;
2567
2568	/* get first element of current buffer */
2569	element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2570	offset = 0;
2571	if (card->options.performance_stats)
2572		card->perf_stats.bufs_rec++;
2573	while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2574				       &offset, &hdr))) {
2575		skb->dev = card->dev;
2576		if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2577			qeth_layer2_rebuild_skb(card, skb, hdr);
2578		else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
2579			vlan_tag = qeth_rebuild_skb(card, skb, hdr);
2580		else { /*in case of OSN*/
2581			skb_push(skb, sizeof(struct qeth_hdr));
2582			skb_copy_to_linear_data(skb, hdr,
2583						sizeof(struct qeth_hdr));
2584		}
2585		/* is device UP ? */
2586		if (!(card->dev->flags & IFF_UP)){
2587			dev_kfree_skb_any(skb);
2588			continue;
2589		}
2590		if (card->info.type == QETH_CARD_TYPE_OSN)
2591			rxrc = card->osn_info.data_cb(skb);
2592		else
2593#ifdef CONFIG_QETH_VLAN
2594		if (vlan_tag)
2595			if (card->vlangrp)
2596				vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2597			else {
2598				dev_kfree_skb_any(skb);
2599				continue;
2600			}
2601		else
2602#endif
2603			rxrc = netif_rx(skb);
2604		card->dev->last_rx = jiffies;
2605		card->stats.rx_packets++;
2606		card->stats.rx_bytes += skb->len;
2607	}
2608}
2609
2610static struct qeth_buffer_pool_entry *
2611qeth_get_buffer_pool_entry(struct qeth_card *card)
2612{
2613	struct qeth_buffer_pool_entry *entry;
2614
2615	QETH_DBF_TEXT(trace, 6, "gtbfplen");
2616	if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2617		entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2618				struct qeth_buffer_pool_entry, list);
2619		list_del_init(&entry->list);
2620		return entry;
2621	}
2622	return NULL;
2623}
2624
2625static void
2626qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2627{
2628	struct qeth_buffer_pool_entry *pool_entry;
2629	int i;
2630
2631	pool_entry = qeth_get_buffer_pool_entry(card);
2632	/*
2633	 * since the buffer is accessed only from the input_tasklet
2634	 * there shouldn't be a need to synchronize; also, since we use
2635	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2636	 * buffers
2637	 */
2638	BUG_ON(!pool_entry);
2639
2640	buf->pool_entry = pool_entry;
2641	for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2642		buf->buffer->element[i].length = PAGE_SIZE;
2643		buf->buffer->element[i].addr =  pool_entry->elements[i];
2644		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2645			buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2646		else
2647			buf->buffer->element[i].flags = 0;
2648	}
2649	buf->state = QETH_QDIO_BUF_EMPTY;
2650}
2651
2652static void
2653qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2654			 struct qeth_qdio_out_buffer *buf)
2655{
2656	int i;
2657	struct sk_buff *skb;
2658
2659	/* is PCI flag set on buffer? */
2660	if (buf->buffer->element[0].flags & 0x40)
2661		atomic_dec(&queue->set_pci_flags_count);
2662
2663	while ((skb = skb_dequeue(&buf->skb_list))){
2664		atomic_dec(&skb->users);
2665		dev_kfree_skb_any(skb);
2666	}
2667	qeth_eddp_buf_release_contexts(buf);
2668	for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2669		buf->buffer->element[i].length = 0;
2670		buf->buffer->element[i].addr = NULL;
2671		buf->buffer->element[i].flags = 0;
2672	}
2673	buf->next_element_to_fill = 0;
2674	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2675}
2676
2677static void
2678qeth_queue_input_buffer(struct qeth_card *card, int index)
2679{
2680	struct qeth_qdio_q *queue = card->qdio.in_q;
2681	int count;
2682	int i;
2683	int rc;
2684
2685	QETH_DBF_TEXT(trace,6,"queinbuf");
2686	count = (index < queue->next_buf_to_init)?
2687		card->qdio.in_buf_pool.buf_count -
2688		(queue->next_buf_to_init - index) :
2689		card->qdio.in_buf_pool.buf_count -
2690		(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2691	/* only requeue at a certain threshold to avoid SIGAs */
2692	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2693		for (i = queue->next_buf_to_init;
2694		     i < queue->next_buf_to_init + count; ++i)
2695			qeth_init_input_buffer(card,
2696				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2697		/*
2698		 * according to old code it should be avoided to requeue all
2699		 * 128 buffers in order to benefit from PCI avoidance.
2700		 * this function keeps at least one buffer (the buffer at
2701		 * 'index') un-requeued -> this buffer is the first buffer that
2702		 * will be requeued the next time
2703		 */
2704		if (card->options.performance_stats) {
2705			card->perf_stats.inbound_do_qdio_cnt++;
2706			card->perf_stats.inbound_do_qdio_start_time =
2707				qeth_get_micros();
2708		}
2709		rc = do_QDIO(CARD_DDEV(card),
2710			     QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2711			     0, queue->next_buf_to_init, count, NULL);
2712		if (card->options.performance_stats)
2713			card->perf_stats.inbound_do_qdio_time +=
2714				qeth_get_micros() -
2715				card->perf_stats.inbound_do_qdio_start_time;
2716		if (rc){
2717			PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2718				   "return %i (device %s).\n",
2719				   rc, CARD_DDEV_ID(card));
2720			QETH_DBF_TEXT(trace,2,"qinberr");
2721			QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2722		}
2723		queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2724					  QDIO_MAX_BUFFERS_PER_Q;
2725	}
2726}
2727
2728static inline void
2729qeth_put_buffer_pool_entry(struct qeth_card *card,
2730			   struct qeth_buffer_pool_entry *entry)
2731{
2732	QETH_DBF_TEXT(trace, 6, "ptbfplen");
2733	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2734}
2735
2736static void
2737qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2738		        unsigned int qdio_err, unsigned int siga_err,
2739			unsigned int queue, int first_element, int count,
2740			unsigned long card_ptr)
2741{
2742	struct net_device *net_dev;
2743	struct qeth_card *card;
2744	struct qeth_qdio_buffer *buffer;
2745	int index;
2746	int i;
2747
2748	QETH_DBF_TEXT(trace, 6, "qdinput");
2749	card = (struct qeth_card *) card_ptr;
2750	net_dev = card->dev;
2751	if (card->options.performance_stats) {
2752		card->perf_stats.inbound_cnt++;
2753		card->perf_stats.inbound_start_time = qeth_get_micros();
2754	}
2755	if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2756		if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2757			QETH_DBF_TEXT(trace, 1,"qdinchk");
2758			QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2759			QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2760			QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2761			qeth_schedule_recovery(card);
2762			return;
2763		}
2764	}
2765	for (i = first_element; i < (first_element + count); ++i) {
2766		index = i % QDIO_MAX_BUFFERS_PER_Q;
2767		buffer = &card->qdio.in_q->bufs[index];
2768		if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
2769		      qeth_check_qdio_errors(buffer->buffer,
2770					     qdio_err, siga_err,"qinerr")))
2771			qeth_process_inbound_buffer(card, buffer, index);
2772		/* clear buffer and give back to hardware */
2773		qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2774		qeth_queue_input_buffer(card, index);
2775	}
2776	if (card->options.performance_stats)
2777		card->perf_stats.inbound_time += qeth_get_micros() -
2778			card->perf_stats.inbound_start_time;
2779}
2780
2781static int
2782qeth_handle_send_error(struct qeth_card *card,
2783		       struct qeth_qdio_out_buffer *buffer,
2784		       unsigned int qdio_err, unsigned int siga_err)
2785{
2786	int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2787	int cc = siga_err & 3;
2788
2789	QETH_DBF_TEXT(trace, 6, "hdsnderr");
2790	qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
2791	switch (cc) {
2792	case 0:
2793		if (qdio_err){
2794			QETH_DBF_TEXT(trace, 1,"lnkfail");
2795			QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2796			QETH_DBF_TEXT_(trace,1,"%04x %02x",
2797				       (u16)qdio_err, (u8)sbalf15);
2798			return QETH_SEND_ERROR_LINK_FAILURE;
2799		}
2800		return QETH_SEND_ERROR_NONE;
2801	case 2:
2802		if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2803			QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2804			QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2805			return QETH_SEND_ERROR_KICK_IT;
2806		}
2807		if ((sbalf15 >= 15) && (sbalf15 <= 31))
2808			return QETH_SEND_ERROR_RETRY;
2809		return QETH_SEND_ERROR_LINK_FAILURE;
2810		/* look at qdio_error and sbalf 15 */
2811	case 1:
2812		QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2813		QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2814		return QETH_SEND_ERROR_LINK_FAILURE;
2815	case 3:
2816	default:
2817		QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2818		QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2819		return QETH_SEND_ERROR_KICK_IT;
2820	}
2821}
2822
2823void
2824qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2825		   int index, int count)
2826{
2827	struct qeth_qdio_out_buffer *buf;
2828	int rc;
2829	int i;
2830	unsigned int qdio_flags;
2831
2832	QETH_DBF_TEXT(trace, 6, "flushbuf");
2833
2834	for (i = index; i < index + count; ++i) {
2835		buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2836		buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2837				SBAL_FLAGS_LAST_ENTRY;
2838
2839		if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2840			continue;
2841
2842		if (!queue->do_pack){
2843			if ((atomic_read(&queue->used_buffers) >=
2844		    		(QETH_HIGH_WATERMARK_PACK -
2845				 QETH_WATERMARK_PACK_FUZZ)) &&
2846		    	    !atomic_read(&queue->set_pci_flags_count)){
2847				/* it's likely that we'll go to packing
2848				 * mode soon */
2849				atomic_inc(&queue->set_pci_flags_count);
2850				buf->buffer->element[0].flags |= 0x40;
2851			}
2852		} else {
2853			if (!atomic_read(&queue->set_pci_flags_count)){
2854				/*
2855				 * there's no outstanding PCI any more, so we
2856				 * have to request a PCI to be sure that the PCI
2857				 * will wake at some time in the future then we
2858				 * can flush packed buffers that might still be
2859				 * hanging around, which can happen if no
2860				 * further send was requested by the stack
2861				 */
2862				atomic_inc(&queue->set_pci_flags_count);
2863				buf->buffer->element[0].flags |= 0x40;
2864			}
2865		}
2866	}
2867
2868	queue->card->dev->trans_start = jiffies;
2869	if (queue->card->options.performance_stats) {
2870		queue->card->perf_stats.outbound_do_qdio_cnt++;
2871		queue->card->perf_stats.outbound_do_qdio_start_time =
2872			qeth_get_micros();
2873	}
2874	qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2875	if (under_int)
2876		qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2877	if (atomic_read(&queue->set_pci_flags_count))
2878		qdio_flags |= QDIO_FLAG_PCI_OUT;
2879	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2880		     queue->queue_no, index, count, NULL);
2881	if (queue->card->options.performance_stats)
2882		queue->card->perf_stats.outbound_do_qdio_time +=
2883			qeth_get_micros() -
2884			queue->card->perf_stats.outbound_do_qdio_start_time;
2885	if (rc){
2886		QETH_DBF_TEXT(trace, 2, "flushbuf");
2887		QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2888		QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
2889		queue->card->stats.tx_errors += count;
2890		/* this must not happen under normal circumstances. if it
2891		 * happens something is really wrong -> recover */
2892		qeth_schedule_recovery(queue->card);
2893		return;
2894	}
2895	atomic_add(count, &queue->used_buffers);
2896	if (queue->card->options.performance_stats)
2897		queue->card->perf_stats.bufs_sent += count;
2898}
2899
2900/*
2901 * Switched to packing state if the number of used buffers on a queue
2902 * reaches a certain limit.
2903 */
2904static void
2905qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2906{
2907	if (!queue->do_pack) {
2908		if (atomic_read(&queue->used_buffers)
2909		    >= QETH_HIGH_WATERMARK_PACK){
2910			/* switch non-PACKING -> PACKING */
2911			QETH_DBF_TEXT(trace, 6, "np->pack");
2912			if (queue->card->options.performance_stats)
2913				queue->card->perf_stats.sc_dp_p++;
2914			queue->do_pack = 1;
2915		}
2916	}
2917}
2918
2919/*
2920 * Switches from packing to non-packing mode. If there is a packing
2921 * buffer on the queue this buffer will be prepared to be flushed.
2922 * In that case 1 is returned to inform the caller. If no buffer
2923 * has to be flushed, zero is returned.
2924 */
2925static int
2926qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2927{
2928	struct qeth_qdio_out_buffer *buffer;
2929	int flush_count = 0;
2930
2931	if (queue->do_pack) {
2932		if (atomic_read(&queue->used_buffers)
2933		    <= QETH_LOW_WATERMARK_PACK) {
2934			/* switch PACKING -> non-PACKING */
2935			QETH_DBF_TEXT(trace, 6, "pack->np");
2936			if (queue->card->options.performance_stats)
2937				queue->card->perf_stats.sc_p_dp++;
2938			queue->do_pack = 0;
2939			/* flush packing buffers */
2940			buffer = &queue->bufs[queue->next_buf_to_fill];
2941			if ((atomic_read(&buffer->state) ==
2942						QETH_QDIO_BUF_EMPTY) &&
2943			    (buffer->next_element_to_fill > 0)) {
2944				atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2945				flush_count++;
2946				queue->next_buf_to_fill =
2947					(queue->next_buf_to_fill + 1) %
2948					QDIO_MAX_BUFFERS_PER_Q;
2949			}
2950		}
2951	}
2952	return flush_count;
2953}
2954
2955/*
2956 * Called to flush a packing buffer if no more pci flags are on the queue.
2957 * Checks if there is a packing buffer and prepares it to be flushed.
2958 * In that case returns 1, otherwise zero.
2959 */
2960static int
2961qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2962{
2963	struct qeth_qdio_out_buffer *buffer;
2964
2965	buffer = &queue->bufs[queue->next_buf_to_fill];
2966	if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2967	   (buffer->next_element_to_fill > 0)){
2968		/* it's a packing buffer */
2969		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2970		queue->next_buf_to_fill =
2971			(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2972		return 1;
2973	}
2974	return 0;
2975}
2976
2977static void
2978qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2979{
2980	int index;
2981	int flush_cnt = 0;
2982	int q_was_packing = 0;
2983
2984	/*
2985	 * check if weed have to switch to non-packing mode or if
2986	 * we have to get a pci flag out on the queue
2987	 */
2988	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2989	    !atomic_read(&queue->set_pci_flags_count)){
2990		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2991				QETH_OUT_Q_UNLOCKED) {
2992			/*
2993			 * If we get in here, there was no action in
2994			 * do_send_packet. So, we check if there is a
2995			 * packing buffer to be flushed here.
2996			 */
2997			netif_stop_queue(queue->card->dev);
2998			index = queue->next_buf_to_fill;
2999			q_was_packing = queue->do_pack;
3000			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3001			if (!flush_cnt &&
3002			    !atomic_read(&queue->set_pci_flags_count))
3003				flush_cnt +=
3004					qeth_flush_buffers_on_no_pci(queue);
3005			if (queue->card->options.performance_stats &&
3006			    q_was_packing)
3007				queue->card->perf_stats.bufs_sent_pack +=
3008					flush_cnt;
3009			if (flush_cnt)
3010				qeth_flush_buffers(queue, 1, index, flush_cnt);
3011			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3012		}
3013	}
3014}
3015
3016static void
3017qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
3018		        unsigned int qdio_error, unsigned int siga_error,
3019			unsigned int __queue, int first_element, int count,
3020			unsigned long card_ptr)
3021{
3022	struct qeth_card *card        = (struct qeth_card *) card_ptr;
3023	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3024	struct qeth_qdio_out_buffer *buffer;
3025	int i;
3026
3027	QETH_DBF_TEXT(trace, 6, "qdouhdl");
3028	if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
3029		if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
3030			QETH_DBF_TEXT(trace, 2, "achkcond");
3031			QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
3032			QETH_DBF_TEXT_(trace, 2, "%08x", status);
3033			netif_stop_queue(card->dev);
3034			qeth_schedule_recovery(card);
3035			return;
3036		}
3037	}
3038	if (card->options.performance_stats) {
3039		card->perf_stats.outbound_handler_cnt++;
3040		card->perf_stats.outbound_handler_start_time =
3041			qeth_get_micros();
3042	}
3043	for(i = first_element; i < (first_element + count); ++i){
3044		buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3045		/*we only handle the KICK_IT error by doing a recovery */
3046		if (qeth_handle_send_error(card, buffer,
3047					   qdio_error, siga_error)
3048				== QETH_SEND_ERROR_KICK_IT){
3049			netif_stop_queue(card->dev);
3050			qeth_schedule_recovery(card);
3051			return;
3052		}
3053		qeth_clear_output_buffer(queue, buffer);
3054	}
3055	atomic_sub(count, &queue->used_buffers);
3056	/* check if we need to do something on this outbound queue */
3057	if (card->info.type != QETH_CARD_TYPE_IQD)
3058		qeth_check_outbound_queue(queue);
3059
3060	netif_wake_queue(queue->card->dev);
3061	if (card->options.performance_stats)
3062		card->perf_stats.outbound_handler_time += qeth_get_micros() -
3063			card->perf_stats.outbound_handler_start_time;
3064}
3065
3066static void
3067qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
3068{
3069
3070	param_field[0] = _ascebc['P'];
3071	param_field[1] = _ascebc['C'];
3072	param_field[2] = _ascebc['I'];
3073	param_field[3] = _ascebc['T'];
3074	*((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
3075	*((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
3076	*((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
3077}
3078
3079static void
3080qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
3081{
3082	param_field[16] = _ascebc['B'];
3083        param_field[17] = _ascebc['L'];
3084        param_field[18] = _ascebc['K'];
3085        param_field[19] = _ascebc['T'];
3086        *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
3087        *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
3088        *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
3089}
3090
3091static void
3092qeth_initialize_working_pool_list(struct qeth_card *card)
3093{
3094	struct qeth_buffer_pool_entry *entry;
3095
3096	QETH_DBF_TEXT(trace,5,"inwrklst");
3097
3098	list_for_each_entry(entry,
3099			    &card->qdio.init_pool.entry_list, init_list) {
3100		qeth_put_buffer_pool_entry(card,entry);
3101	}
3102}
3103
3104static void
3105qeth_clear_working_pool_list(struct qeth_card *card)
3106{
3107	struct qeth_buffer_pool_entry *pool_entry, *tmp;
3108
3109	QETH_DBF_TEXT(trace,5,"clwrklst");
3110	list_for_each_entry_safe(pool_entry, tmp,
3111			    &card->qdio.in_buf_pool.entry_list, list){
3112			list_del(&pool_entry->list);
3113	}
3114}
3115
3116static void
3117qeth_free_buffer_pool(struct qeth_card *card)
3118{
3119	struct qeth_buffer_pool_entry *pool_entry, *tmp;
3120	int i=0;
3121	QETH_DBF_TEXT(trace,5,"freepool");
3122	list_for_each_entry_safe(pool_entry, tmp,
3123				 &card->qdio.init_pool.entry_list, init_list){
3124		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
3125			free_page((unsigned long)pool_entry->elements[i]);
3126		list_del(&pool_entry->init_list);
3127		kfree(pool_entry);
3128	}
3129}
3130
3131static int
3132qeth_alloc_buffer_pool(struct qeth_card *card)
3133{
3134	struct qeth_buffer_pool_entry *pool_entry;
3135	void *ptr;
3136	int i, j;
3137
3138	QETH_DBF_TEXT(trace,5,"alocpool");
3139	for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
3140	 	pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
3141		if (!pool_entry){
3142			qeth_free_buffer_pool(card);
3143			return -ENOMEM;
3144		}
3145		for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3146			ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3147			if (!ptr) {
3148				while (j > 0)
3149					free_page((unsigned long)
3150						  pool_entry->elements[--j]);
3151				kfree(pool_entry);
3152				qeth_free_buffer_pool(card);
3153				return -ENOMEM;
3154			}
3155			pool_entry->elements[j] = ptr;
3156		}
3157		list_add(&pool_entry->init_list,
3158			 &card->qdio.init_pool.entry_list);
3159	}
3160	return 0;
3161}
3162
3163int
3164qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
3165{
3166	QETH_DBF_TEXT(trace, 2, "realcbp");
3167
3168	if ((card->state != CARD_STATE_DOWN) &&
3169	    (card->state != CARD_STATE_RECOVER))
3170		return -EPERM;
3171
3172	/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
3173	qeth_clear_working_pool_list(card);
3174	qeth_free_buffer_pool(card);
3175	card->qdio.in_buf_pool.buf_count = bufcnt;
3176	card->qdio.init_pool.buf_count = bufcnt;
3177	return qeth_alloc_buffer_pool(card);
3178}
3179
3180static int
3181qeth_alloc_qdio_buffers(struct qeth_card *card)
3182{
3183	int i, j;
3184
3185	QETH_DBF_TEXT(setup, 2, "allcqdbf");
3186
3187	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
3188		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
3189		return 0;
3190
3191	card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3192				  GFP_KERNEL|GFP_DMA);
3193	if (!card->qdio.in_q)
3194		goto out_nomem;
3195	QETH_DBF_TEXT(setup, 2, "inq");
3196	QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3197	memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3198	/* give inbound qeth_qdio_buffers their qdio_buffers */
3199	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3200		card->qdio.in_q->bufs[i].buffer =
3201			&card->qdio.in_q->qdio_bufs[i];
3202	/* inbound buffer pool */
3203	if (qeth_alloc_buffer_pool(card))
3204		goto out_freeinq;
3205	/* outbound */
3206	card->qdio.out_qs =
3207		kmalloc(card->qdio.no_out_queues *
3208			sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3209	if (!card->qdio.out_qs)
3210		goto out_freepool;
3211	for (i = 0; i < card->qdio.no_out_queues; ++i) {
3212		card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3213					       GFP_KERNEL|GFP_DMA);
3214		if (!card->qdio.out_qs[i])
3215			goto out_freeoutq;
3216		QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3217		QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3218		memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3219		card->qdio.out_qs[i]->queue_no = i;
3220		/* give outbound qeth_qdio_buffers their qdio_buffers */
3221		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3222			card->qdio.out_qs[i]->bufs[j].buffer =
3223				&card->qdio.out_qs[i]->qdio_bufs[j];
3224			skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3225					    skb_list);
3226			lockdep_set_class(
3227				&card->qdio.out_qs[i]->bufs[j].skb_list.lock,
3228				&qdio_out_skb_queue_key);
3229			INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3230		}
3231	}
3232	return 0;
3233
3234out_freeoutq:
3235	while (i > 0)
3236		kfree(card->qdio.out_qs[--i]);
3237	kfree(card->qdio.out_qs);
3238out_freepool:
3239	qeth_free_buffer_pool(card);
3240out_freeinq:
3241	kfree(card->qdio.in_q);
3242out_nomem:
3243	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
3244	return -ENOMEM;
3245}
3246
3247static void
3248qeth_free_qdio_buffers(struct qeth_card *card)
3249{
3250	int i, j;
3251
3252	QETH_DBF_TEXT(trace, 2, "freeqdbf");
3253	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
3254		QETH_QDIO_UNINITIALIZED)
3255		return;
3256	kfree(card->qdio.in_q);
3257	/* inbound buffer pool */
3258	qeth_free_buffer_pool(card);
3259	/* free outbound qdio_qs */
3260	for (i = 0; i < card->qdio.no_out_queues; ++i){
3261		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3262			qeth_clear_output_buffer(card->qdio.out_qs[i],
3263					&card->qdio.out_qs[i]->bufs[j]);
3264		kfree(card->qdio.out_qs[i]);
3265	}
3266	kfree(card->qdio.out_qs);
3267}
3268
3269static void
3270qeth_clear_qdio_buffers(struct qeth_card *card)
3271{
3272	int i, j;
3273
3274	QETH_DBF_TEXT(trace, 2, "clearqdbf");
3275	/* clear outbound buffers to free skbs */
3276	for (i = 0; i < card->qdio.no_out_queues; ++i)
3277		if (card->qdio.out_qs[i]){
3278			for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3279				qeth_clear_output_buffer(card->qdio.out_qs[i],
3280						&card->qdio.out_qs[i]->bufs[j]);
3281		}
3282}
3283
3284static void
3285qeth_init_qdio_info(struct qeth_card *card)
3286{
3287	QETH_DBF_TEXT(setup, 4, "intqdinf");
3288	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
3289	/* inbound */
3290	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3291	card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3292	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3293	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3294	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
3295}
3296
3297static int
3298qeth_init_qdio_queues(struct qeth_card *card)
3299{
3300	int i, j;
3301	int rc;
3302
3303	QETH_DBF_TEXT(setup, 2, "initqdqs");
3304
3305	/* inbound queue */
3306	memset(card->qdio.in_q->qdio_bufs, 0,
3307	       QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3308	qeth_initialize_working_pool_list(card);
3309	/*give only as many buffers to hardware as we have buffer pool entries*/
3310	for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3311		qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3312	card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3313	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3314		     card->qdio.in_buf_pool.buf_count - 1, NULL);
3315	if (rc) {
3316		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3317		return rc;
3318	}
3319	rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3320	if (rc) {
3321		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3322		return rc;
3323	}
3324	/* outbound queue */
3325	for (i = 0; i < card->qdio.no_out_queues; ++i){
3326		memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3327		       QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3328		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3329			qeth_clear_output_buffer(card->qdio.out_qs[i],
3330					&card->qdio.out_qs[i]->bufs[j]);
3331		}
3332		card->qdio.out_qs[i]->card = card;
3333		card->qdio.out_qs[i]->next_buf_to_fill = 0;
3334		card->qdio.out_qs[i]->do_pack = 0;
3335		atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3336		atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3337		atomic_set(&card->qdio.out_qs[i]->state,
3338			   QETH_OUT_Q_UNLOCKED);
3339	}
3340	return 0;
3341}
3342
3343static int
3344qeth_qdio_establish(struct qeth_card *card)
3345{
3346	struct qdio_initialize init_data;
3347	char *qib_param_field;
3348	struct qdio_buffer **in_sbal_ptrs;
3349	struct qdio_buffer **out_sbal_ptrs;
3350	int i, j, k;
3351	int rc = 0;
3352
3353	QETH_DBF_TEXT(setup, 2, "qdioest");
3354
3355	qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3356			      GFP_KERNEL);
3357 	if (!qib_param_field)
3358		return -ENOMEM;
3359
3360	qeth_create_qib_param_field(card, qib_param_field);
3361	qeth_create_qib_param_field_blkt(card, qib_param_field);
3362
3363	in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3364			       GFP_KERNEL);
3365	if (!in_sbal_ptrs) {
3366		kfree(qib_param_field);
3367		return -ENOMEM;
3368	}
3369	for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3370		in_sbal_ptrs[i] = (struct qdio_buffer *)
3371			virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3372
3373	out_sbal_ptrs =
3374		kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3375			sizeof(void *), GFP_KERNEL);
3376	if (!out_sbal_ptrs) {
3377		kfree(in_sbal_ptrs);
3378		kfree(qib_param_field);
3379		return -ENOMEM;
3380	}
3381	for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3382		for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3383			out_sbal_ptrs[k] = (struct qdio_buffer *)
3384				virt_to_phys(card->qdio.out_qs[i]->
3385					     bufs[j].buffer);
3386		}
3387
3388	memset(&init_data, 0, sizeof(struct qdio_initialize));
3389	init_data.cdev                   = CARD_DDEV(card);
3390	init_data.q_format               = qeth_get_qdio_q_format(card);
3391	init_data.qib_param_field_format = 0;
3392	init_data.qib_param_field        = qib_param_field;
3393	init_data.min_input_threshold    = QETH_MIN_INPUT_THRESHOLD;
3394	init_data.max_input_threshold    = QETH_MAX_INPUT_THRESHOLD;
3395	init_data.min_output_threshold   = QETH_MIN_OUTPUT_THRESHOLD;
3396	init_data.max_output_threshold   = QETH_MAX_OUTPUT_THRESHOLD;
3397	init_data.no_input_qs            = 1;
3398	init_data.no_output_qs           = card->qdio.no_out_queues;
3399	init_data.input_handler          = (qdio_handler_t *)
3400					   qeth_qdio_input_handler;
3401	init_data.output_handler         = (qdio_handler_t *)
3402					   qeth_qdio_output_handler;
3403	init_data.int_parm               = (unsigned long) card;
3404	init_data.flags                  = QDIO_INBOUND_0COPY_SBALS |
3405					   QDIO_OUTBOUND_0COPY_SBALS |
3406					   QDIO_USE_OUTBOUND_PCIS;
3407	init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
3408	init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3409
3410	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3411		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED)
3412		if ((rc = qdio_initialize(&init_data)))
3413			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3414
3415	kfree(out_sbal_ptrs);
3416	kfree(in_sbal_ptrs);
3417	kfree(qib_param_field);
3418	return rc;
3419}
3420
3421static int
3422qeth_qdio_activate(struct qeth_card *card)
3423{
3424	QETH_DBF_TEXT(setup,3,"qdioact");
3425	return qdio_activate(CARD_DDEV(card), 0);
3426}
3427
3428static int
3429qeth_clear_channel(struct qeth_channel *channel)
3430{
3431	unsigned long flags;
3432	struct qeth_card *card;
3433	int rc;
3434
3435	QETH_DBF_TEXT(trace,3,"clearch");
3436	card = CARD_FROM_CDEV(channel->ccwdev);
3437	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3438	rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3439	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3440
3441	if (rc)
3442		return rc;
3443	rc = wait_event_interruptible_timeout(card->wait_q,
3444			channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3445	if (rc == -ERESTARTSYS)
3446		return rc;
3447	if (channel->state != CH_STATE_STOPPED)
3448		return -ETIME;
3449	channel->state = CH_STATE_DOWN;
3450	return 0;
3451}
3452
3453static int
3454qeth_halt_channel(struct qeth_channel *channel)
3455{
3456	unsigned long flags;
3457	struct qeth_card *card;
3458	int rc;
3459
3460	QETH_DBF_TEXT(trace,3,"haltch");
3461	card = CARD_FROM_CDEV(channel->ccwdev);
3462	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3463	rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3464	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3465
3466	if (rc)
3467		return rc;
3468	rc = wait_event_interruptible_timeout(card->wait_q,
3469			channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3470	if (rc == -ERESTARTSYS)
3471		return rc;
3472	if (channel->state != CH_STATE_HALTED)
3473		return -ETIME;
3474	return 0;
3475}
3476
3477static int
3478qeth_halt_channels(struct qeth_card *card)
3479{
3480	int rc1 = 0, rc2=0, rc3 = 0;
3481
3482	QETH_DBF_TEXT(trace,3,"haltchs");
3483	rc1 = qeth_halt_channel(&card->read);
3484	rc2 = qeth_halt_channel(&card->write);
3485	rc3 = qeth_halt_channel(&card->data);
3486	if (rc1)
3487		return rc1;
3488	if (rc2)
3489		return rc2;
3490	return rc3;
3491}
3492static int
3493qeth_clear_channels(struct qeth_card *card)
3494{
3495	int rc1 = 0, rc2=0, rc3 = 0;
3496
3497	QETH_DBF_TEXT(trace,3,"clearchs");
3498	rc1 = qeth_clear_channel(&card->read);
3499	rc2 = qeth_clear_channel(&card->write);
3500	rc3 = qeth_clear_channel(&card->data);
3501	if (rc1)
3502		return rc1;
3503	if (rc2)
3504		return rc2;
3505	return rc3;
3506}
3507
3508static int
3509qeth_clear_halt_card(struct qeth_card *card, int halt)
3510{
3511	int rc = 0;
3512
3513	QETH_DBF_TEXT(trace,3,"clhacrd");
3514	QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3515
3516	if (halt)
3517		rc = qeth_halt_channels(card);
3518	if (rc)
3519		return rc;
3520	return qeth_clear_channels(card);
3521}
3522
3523static int
3524qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3525{
3526	int rc = 0;
3527
3528	QETH_DBF_TEXT(trace,3,"qdioclr");
3529	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
3530		QETH_QDIO_CLEANING)) {
3531	case QETH_QDIO_ESTABLISHED:
3532		if ((rc = qdio_cleanup(CARD_DDEV(card),
3533				(card->info.type == QETH_CARD_TYPE_IQD) ?
3534				QDIO_FLAG_CLEANUP_USING_HALT :
3535				QDIO_FLAG_CLEANUP_USING_CLEAR)))
3536			QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3537		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3538		break;
3539	case QETH_QDIO_CLEANING:
3540		return rc;
3541	default:
3542		break;
3543	}
3544	if ((rc = qeth_clear_halt_card(card, use_halt)))
3545		QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3546	card->state = CARD_STATE_DOWN;
3547	return rc;
3548}
3549
3550static int
3551qeth_dm_act(struct qeth_card *card)
3552{
3553	int rc;
3554	struct qeth_cmd_buffer *iob;
3555
3556	QETH_DBF_TEXT(setup,2,"dmact");
3557
3558	iob = qeth_wait_for_buffer(&card->write);
3559	memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3560
3561	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3562	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3563	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3564	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3565	rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3566	return rc;
3567}
3568
3569static int
3570qeth_mpc_initialize(struct qeth_card *card)
3571{
3572	int rc;
3573
3574	QETH_DBF_TEXT(setup,2,"mpcinit");
3575
3576	if ((rc = qeth_issue_next_read(card))){
3577		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3578		return rc;
3579	}
3580	if ((rc = qeth_cm_enable(card))){
3581		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3582		goto out_qdio;
3583	}
3584	if ((rc = qeth_cm_setup(card))){
3585		QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3586		goto out_qdio;
3587	}
3588	if ((rc = qeth_ulp_enable(card))){
3589		QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3590		goto out_qdio;
3591	}
3592	if ((rc = qeth_ulp_setup(card))){
3593		QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3594		goto out_qdio;
3595	}
3596	if ((rc = qeth_alloc_qdio_buffers(card))){
3597		QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3598		goto out_qdio;
3599	}
3600	if ((rc = qeth_qdio_establish(card))){
3601		QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3602		qeth_free_qdio_buffers(card);
3603		goto out_qdio;
3604	}
3605 	if ((rc = qeth_qdio_activate(card))){
3606		QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3607		goto out_qdio;
3608	}
3609	if ((rc = qeth_dm_act(card))){
3610		QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3611		goto out_qdio;
3612	}
3613
3614	return 0;
3615out_qdio:
3616	qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
3617	return rc;
3618}
3619
3620static struct net_device *
3621qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3622{
3623	struct net_device *dev = NULL;
3624
3625	switch (type) {
3626	case QETH_CARD_TYPE_OSAE:
3627		switch (linktype) {
3628		case QETH_LINK_TYPE_LANE_TR:
3629		case QETH_LINK_TYPE_HSTR:
3630#ifdef CONFIG_TR
3631			dev = alloc_trdev(0);
3632#endif /* CONFIG_TR */
3633			break;
3634		default:
3635			dev = alloc_etherdev(0);
3636		}
3637		break;
3638	case QETH_CARD_TYPE_IQD:
3639		dev = alloc_netdev(0, "hsi%d", ether_setup);
3640		break;
3641	case QETH_CARD_TYPE_OSN:
3642		dev = alloc_netdev(0, "osn%d", ether_setup);
3643		break;
3644	default:
3645		dev = alloc_etherdev(0);
3646	}
3647	return dev;
3648}
3649
3650/*hard_header fake function; used in case fake_ll is set */
3651static int
3652qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3653		     unsigned short type, void *daddr, void *saddr,
3654		     unsigned len)
3655{
3656	if(dev->type == ARPHRD_IEEE802_TR){
3657		struct trh_hdr *hdr;
3658        	hdr = (struct trh_hdr *)skb_push(skb, QETH_FAKE_LL_LEN_TR);
3659		memcpy(hdr->saddr, dev->dev_addr, TR_ALEN);
3660        	memcpy(hdr->daddr, "FAKELL", TR_ALEN);
3661		return QETH_FAKE_LL_LEN_TR;
3662
3663	} else {
3664		struct ethhdr *hdr;
3665        	hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN_ETH);
3666		memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3667        	memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3668        	if (type != ETH_P_802_3)
3669                	hdr->h_proto = htons(type);
3670        	else
3671                	hdr->h_proto = htons(len);
3672		return QETH_FAKE_LL_LEN_ETH;
3673
3674	}
3675}
3676
3677static int
3678qeth_send_packet(struct qeth_card *, struct sk_buff *);
3679
3680static int
3681qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3682{
3683	int rc;
3684	struct qeth_card *card;
3685
3686	QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3687	card = (struct qeth_card *)dev->priv;
3688	if (skb==NULL) {
3689		card->stats.tx_dropped++;
3690		card->stats.tx_errors++;
3691		/* return OK; otherwise ksoftirqd goes to 100% */
3692		return NETDEV_TX_OK;
3693	}
3694	if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3695		card->stats.tx_dropped++;
3696		card->stats.tx_errors++;
3697		card->stats.tx_carrier_errors++;
3698		dev_kfree_skb_any(skb);
3699		/* return OK; otherwise ksoftirqd goes to 100% */
3700		return NETDEV_TX_OK;
3701	}
3702	if (card->options.performance_stats) {
3703		card->perf_stats.outbound_cnt++;
3704		card->perf_stats.outbound_start_time = qeth_get_micros();
3705	}
3706	netif_stop_queue(dev);
3707	if ((rc = qeth_send_packet(card, skb))) {
3708		if (rc == -EBUSY) {
3709			return NETDEV_TX_BUSY;
3710		} else {
3711			card->stats.tx_errors++;
3712			card->stats.tx_dropped++;
3713			dev_kfree_skb_any(skb);
3714			/*set to OK; otherwise ksoftirqd goes to 100% */
3715			rc = NETDEV_TX_OK;
3716		}
3717	}
3718	netif_wake_queue(dev);
3719	if (card->options.performance_stats)
3720		card->perf_stats.outbound_time += qeth_get_micros() -
3721			card->perf_stats.outbound_start_time;
3722	return rc;
3723}
3724
3725static int
3726qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3727{
3728	int rc = 0;
3729#ifdef CONFIG_QETH_VLAN
3730	struct vlan_group *vg;
3731	int i;
3732
3733	if (!(vg = card->vlangrp))
3734		return rc;
3735
3736	for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3737		if (vlan_group_get_device(vg, i) == dev){
3738			rc = QETH_VLAN_CARD;
3739			break;
3740		}
3741	}
3742	if (rc && !(VLAN_DEV_INFO(dev)->real_dev->priv == (void *)card))
3743		return 0;
3744
3745#endif
3746	return rc;
3747}
3748
3749static int
3750qeth_verify_dev(struct net_device *dev)
3751{
3752	struct qeth_card *card;
3753	unsigned long flags;
3754	int rc = 0;
3755
3756	read_lock_irqsave(&qeth_card_list.rwlock, flags);
3757	list_for_each_entry(card, &qeth_card_list.list, list){
3758		if (card->dev == dev){
3759			rc = QETH_REAL_CARD;
3760			break;
3761		}
3762		rc = qeth_verify_vlan_dev(dev, card);
3763		if (rc)
3764			break;
3765	}
3766	read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3767
3768	return rc;
3769}
3770
3771static struct qeth_card *
3772qeth_get_card_from_dev(struct net_device *dev)
3773{
3774	struct qeth_card *card = NULL;
3775	int rc;
3776
3777	rc = qeth_verify_dev(dev);
3778	if (rc == QETH_REAL_CARD)
3779		card = (struct qeth_card *)dev->priv;
3780	else if (rc == QETH_VLAN_CARD)
3781		card = (struct qeth_card *)
3782			VLAN_DEV_INFO(dev)->real_dev->priv;
3783
3784	QETH_DBF_TEXT_(trace, 4, "%d", rc);
3785	return card ;
3786}
3787
3788static void
3789qeth_tx_timeout(struct net_device *dev)
3790{
3791	struct qeth_card *card;
3792
3793	card = (struct qeth_card *) dev->priv;
3794	card->stats.tx_errors++;
3795	qeth_schedule_recovery(card);
3796}
3797
3798static int
3799qeth_open(struct net_device *dev)
3800{
3801	struct qeth_card *card;
3802
3803	QETH_DBF_TEXT(trace, 4, "qethopen");
3804
3805	card = (struct qeth_card *) dev->priv;
3806
3807	if (card->state != CARD_STATE_SOFTSETUP)
3808		return -ENODEV;
3809
3810	if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
3811	     (card->options.layer2) &&
3812	     (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
3813		QETH_DBF_TEXT(trace,4,"nomacadr");
3814		return -EPERM;
3815	}
3816	card->data.state = CH_STATE_UP;
3817	card->state = CARD_STATE_UP;
3818	card->dev->flags |= IFF_UP;
3819	netif_start_queue(dev);
3820
3821	if (!card->lan_online && netif_carrier_ok(dev))
3822		netif_carrier_off(dev);
3823	return 0;
3824}
3825
3826static int
3827qeth_stop(struct net_device *dev)
3828{
3829	struct qeth_card *card;
3830
3831	QETH_DBF_TEXT(trace, 4, "qethstop");
3832
3833	card = (struct qeth_card *) dev->priv;
3834
3835	netif_tx_disable(dev);
3836	card->dev->flags &= ~IFF_UP;
3837	if (card->state == CARD_STATE_UP)
3838		card->state = CARD_STATE_SOFTSETUP;
3839	return 0;
3840}
3841
3842static int
3843qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3844{
3845	int cast_type = RTN_UNSPEC;
3846
3847	if (card->info.type == QETH_CARD_TYPE_OSN)
3848		return cast_type;
3849
3850	if (skb->dst && skb->dst->neighbour){
3851		cast_type = skb->dst->neighbour->type;
3852		if ((cast_type == RTN_BROADCAST) ||
3853		    (cast_type == RTN_MULTICAST) ||
3854		    (cast_type == RTN_ANYCAST))
3855			return cast_type;
3856		else
3857			return RTN_UNSPEC;
3858	}
3859	/* try something else */
3860	if (skb->protocol == ETH_P_IPV6)
3861		return (skb_network_header(skb)[24] == 0xff) ?
3862				RTN_MULTICAST : 0;
3863	else if (skb->protocol == ETH_P_IP)
3864		return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
3865				RTN_MULTICAST : 0;
3866	/* ... */
3867	if (!memcmp(skb->data, skb->dev->broadcast, 6))
3868		return RTN_BROADCAST;
3869	else {
3870		u16 hdr_mac;
3871
3872	        hdr_mac = *((u16 *)skb->data);
3873	        /* tr multicast? */
3874	        switch (card->info.link_type) {
3875	        case QETH_LINK_TYPE_HSTR:
3876	        case QETH_LINK_TYPE_LANE_TR:
3877	        	if ((hdr_mac == QETH_TR_MAC_NC) ||
3878			    (hdr_mac == QETH_TR_MAC_C))
3879				return RTN_MULTICAST;
3880			break;
3881	        /* eth or so multicast? */
3882                default:
3883                      	if ((hdr_mac == QETH_ETH_MAC_V4) ||
3884			    (hdr_mac == QETH_ETH_MAC_V6))
3885			        return RTN_MULTICAST;
3886	        }
3887        }
3888	return cast_type;
3889}
3890
3891static int
3892qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3893		        int ipv, int cast_type)
3894{
3895	if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3896		return card->qdio.default_out_queue;
3897	switch (card->qdio.no_out_queues) {
3898	case 4:
3899		if (cast_type && card->info.is_multicast_different)
3900			return card->info.is_multicast_different &
3901				(card->qdio.no_out_queues - 1);
3902		if (card->qdio.do_prio_queueing && (ipv == 4)) {
3903			const u8 tos = ip_hdr(skb)->tos;
3904
3905			if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3906				if (tos & IP_TOS_NOTIMPORTANT)
3907					return 3;
3908				if (tos & IP_TOS_HIGHRELIABILITY)
3909					return 2;
3910				if (tos & IP_TOS_HIGHTHROUGHPUT)
3911					return 1;
3912				if (tos & IP_TOS_LOWDELAY)
3913					return 0;
3914			}
3915			if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3916				return 3 - (tos >> 6);
3917		} else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3918			/* TODO: IPv6!!! */
3919		}
3920		return card->qdio.default_out_queue;
3921	case 1: /* fallthrough for single-out-queue 1920-device */
3922	default:
3923		return card->qdio.default_out_queue;
3924	}
3925}
3926
3927static inline int
3928qeth_get_ip_version(struct sk_buff *skb)
3929{
3930	switch (skb->protocol) {
3931	case ETH_P_IPV6:
3932		return 6;
3933	case ETH_P_IP:
3934		return 4;
3935	default:
3936		return 0;
3937	}
3938}
3939
3940static struct qeth_hdr *
3941__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3942{
3943#ifdef CONFIG_QETH_VLAN
3944	u16 *tag;
3945	if (card->vlangrp && vlan_tx_tag_present(skb) &&
3946	    ((ipv == 6) || card->options.layer2) ) {
3947		/*
3948		 * Move the mac addresses (6 bytes src, 6 bytes dest)
3949		 * to the beginning of the new header.  We are using three
3950		 * memcpys instead of one memmove to save cycles.
3951		 */
3952		skb_push(skb, VLAN_HLEN);
3953		skb_copy_to_linear_data(skb, skb->data + 4, 4);
3954		skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4);
3955		skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4);
3956		tag = (u16 *)(skb->data + 12);
3957		/*
3958		 * first two bytes  = ETH_P_8021Q (0x8100)
3959		 * second two bytes = VLANID
3960		 */
3961		*tag = __constant_htons(ETH_P_8021Q);
3962		*(tag + 1) = htons(vlan_tx_tag_get(skb));
3963	}
3964#endif
3965	return ((struct qeth_hdr *)
3966		qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
3967}
3968
3969static void
3970__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
3971{
3972	if (orig_skb != new_skb)
3973		dev_kfree_skb_any(new_skb);
3974}
3975
3976static struct sk_buff *
3977qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3978		 struct qeth_hdr **hdr, int ipv)
3979{
3980	struct sk_buff *new_skb, *new_skb2;
3981
3982	QETH_DBF_TEXT(trace, 6, "prepskb");
3983	new_skb = skb;
3984	new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
3985	if (!new_skb)
3986		return NULL;
3987	new_skb2 = qeth_realloc_headroom(card, new_skb,
3988					 sizeof(struct qeth_hdr));
3989	if (!new_skb2) {
3990		__qeth_free_new_skb(skb, new_skb);
3991		return NULL;
3992	}
3993	if (new_skb != skb)
3994		__qeth_free_new_skb(new_skb2, new_skb);
3995	new_skb = new_skb2;
3996	*hdr = __qeth_prepare_skb(card, new_skb, ipv);
3997	if (*hdr == NULL) {
3998		__qeth_free_new_skb(skb, new_skb);
3999		return NULL;
4000	}
4001	return new_skb;
4002}
4003
4004static inline u8
4005qeth_get_qeth_hdr_flags4(int cast_type)
4006{
4007	if (cast_type == RTN_MULTICAST)
4008		return QETH_CAST_MULTICAST;
4009	if (cast_type == RTN_BROADCAST)
4010		return QETH_CAST_BROADCAST;
4011	return QETH_CAST_UNICAST;
4012}
4013
4014static inline u8
4015qeth_get_qeth_hdr_flags6(int cast_type)
4016{
4017	u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
4018	if (cast_type == RTN_MULTICAST)
4019		return ct | QETH_CAST_MULTICAST;
4020	if (cast_type == RTN_ANYCAST)
4021		return ct | QETH_CAST_ANYCAST;
4022	if (cast_type == RTN_BROADCAST)
4023		return ct | QETH_CAST_BROADCAST;
4024	return ct | QETH_CAST_UNICAST;
4025}
4026
4027static void
4028qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
4029			    struct sk_buff *skb)
4030{
4031	__u16 hdr_mac;
4032
4033	if (!memcmp(skb->data+QETH_HEADER_SIZE,
4034		    skb->dev->broadcast,6)) { /* broadcast? */
4035		*(__u32 *)hdr->hdr.l2.flags |=
4036			 QETH_LAYER2_FLAG_BROADCAST << 8;
4037		return;
4038	}
4039	hdr_mac=*((__u16*)skb->data);
4040	/* tr multicast? */
4041	switch (card->info.link_type) {
4042	case QETH_LINK_TYPE_HSTR:
4043	case QETH_LINK_TYPE_LANE_TR:
4044		if ((hdr_mac == QETH_TR_MAC_NC) ||
4045		    (hdr_mac == QETH_TR_MAC_C) )
4046			*(__u32 *)hdr->hdr.l2.flags |=
4047				QETH_LAYER2_FLAG_MULTICAST << 8;
4048		else
4049			*(__u32 *)hdr->hdr.l2.flags |=
4050				QETH_LAYER2_FLAG_UNICAST << 8;
4051		break;
4052		/* eth or so multicast? */
4053	default:
4054		if ( (hdr_mac==QETH_ETH_MAC_V4) ||
4055		     (hdr_mac==QETH_ETH_MAC_V6) )
4056			*(__u32 *)hdr->hdr.l2.flags |=
4057				QETH_LAYER2_FLAG_MULTICAST << 8;
4058		else
4059			*(__u32 *)hdr->hdr.l2.flags |=
4060				QETH_LAYER2_FLAG_UNICAST << 8;
4061	}
4062}
4063
4064static void
4065qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4066			struct sk_buff *skb, int cast_type)
4067{
4068	memset(hdr, 0, sizeof(struct qeth_hdr));
4069	hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
4070
4071	/* set byte 0 to "0x02" and byte 3 to casting flags */
4072	if (cast_type==RTN_MULTICAST)
4073		*(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
4074	else if (cast_type==RTN_BROADCAST)
4075		*(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
4076	 else
4077		qeth_layer2_get_packet_type(card, hdr, skb);
4078
4079	hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
4080#ifdef CONFIG_QETH_VLAN
4081	/* VSWITCH relies on the VLAN
4082	 * information to be present in
4083	 * the QDIO header */
4084	if ((card->vlangrp != NULL) &&
4085	    vlan_tx_tag_present(skb)) {
4086		*(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
4087		hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
4088	}
4089#endif
4090}
4091
4092void
4093qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4094		struct sk_buff *skb, int ipv, int cast_type)
4095{
4096	QETH_DBF_TEXT(trace, 6, "fillhdr");
4097
4098	memset(hdr, 0, sizeof(struct qeth_hdr));
4099	if (card->options.layer2) {
4100		qeth_layer2_fill_header(card, hdr, skb, cast_type);
4101		return;
4102	}
4103	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
4104	hdr->hdr.l3.ext_flags = 0;
4105#ifdef CONFIG_QETH_VLAN
4106	/*
4107	 * before we're going to overwrite this location with next hop ip.
4108	 * v6 uses passthrough, v4 sets the tag in the QDIO header.
4109	 */
4110	if (card->vlangrp && vlan_tx_tag_present(skb)) {
4111		hdr->hdr.l3.ext_flags = (ipv == 4) ?
4112			QETH_HDR_EXT_VLAN_FRAME :
4113			QETH_HDR_EXT_INCLUDE_VLAN_TAG;
4114		hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
4115	}
4116#endif /* CONFIG_QETH_VLAN */
4117	hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
4118	if (ipv == 4) {	 /* IPv4 */
4119		hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
4120		memset(hdr->hdr.l3.dest_addr, 0, 12);
4121		if ((skb->dst) && (skb->dst->neighbour)) {
4122			*((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
4123			    *((u32 *) skb->dst->neighbour->primary_key);
4124		} else {
4125			/* fill in destination address used in ip header */
4126			*((u32 *)(&hdr->hdr.l3.dest_addr[12])) =
4127							   ip_hdr(skb)->daddr;
4128		}
4129	} else if (ipv == 6) { /* IPv6 or passthru */
4130		hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
4131		if ((skb->dst) && (skb->dst->neighbour)) {
4132			memcpy(hdr->hdr.l3.dest_addr,
4133			       skb->dst->neighbour->primary_key, 16);
4134		} else {
4135			/* fill in destination address used in ip header */
4136			memcpy(hdr->hdr.l3.dest_addr,
4137			       &ipv6_hdr(skb)->daddr, 16);
4138		}
4139	} else { /* passthrough */
4140                if((skb->dev->type == ARPHRD_IEEE802_TR) &&
4141		    !memcmp(skb->data + sizeof(struct qeth_hdr) +
4142		    sizeof(__u16), skb->dev->broadcast, 6)) {
4143			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4144						QETH_HDR_PASSTHRU;
4145		} else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
4146			    skb->dev->broadcast, 6)) {   /* broadcast? */
4147			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4148						QETH_HDR_PASSTHRU;
4149		} else {
4150 			hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
4151 				QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
4152 				QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
4153		}
4154	}
4155}
4156
4157static void
4158__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4159		   int is_tso, int *next_element_to_fill)
4160{
4161	int length = skb->len;
4162	int length_here;
4163	int element;
4164	char *data;
4165	int first_lap ;
4166
4167	element = *next_element_to_fill;
4168	data = skb->data;
4169	first_lap = (is_tso == 0 ? 1 : 0);
4170
4171	while (length > 0) {
4172		/* length_here is the remaining amount of data in this page */
4173		length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
4174		if (length < length_here)
4175			length_here = length;
4176
4177		buffer->element[element].addr = data;
4178		buffer->element[element].length = length_here;
4179		length -= length_here;
4180		if (!length) {
4181			if (first_lap)
4182				buffer->element[element].flags = 0;
4183			else
4184				buffer->element[element].flags =
4185				    SBAL_FLAGS_LAST_FRAG;
4186		} else {
4187			if (first_lap)
4188				buffer->element[element].flags =
4189				    SBAL_FLAGS_FIRST_FRAG;
4190			else
4191				buffer->element[element].flags =
4192				    SBAL_FLAGS_MIDDLE_FRAG;
4193		}
4194		data += length_here;
4195		element++;
4196		first_lap = 0;
4197	}
4198	*next_element_to_fill = element;
4199}
4200
4201static int
4202qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4203		 struct qeth_qdio_out_buffer *buf,
4204		 struct sk_buff *skb)
4205{
4206	struct qdio_buffer *buffer;
4207	struct qeth_hdr_tso *hdr;
4208	int flush_cnt = 0, hdr_len, large_send = 0;
4209
4210	QETH_DBF_TEXT(trace, 6, "qdfillbf");
4211
4212	buffer = buf->buffer;
4213	atomic_inc(&skb->users);
4214	skb_queue_tail(&buf->skb_list, skb);
4215
4216	hdr  = (struct qeth_hdr_tso *) skb->data;
4217	/*check first on TSO ....*/
4218	if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4219		int element = buf->next_element_to_fill;
4220
4221		hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
4222		/*fill first buffer entry only with header information */
4223		buffer->element[element].addr = skb->data;
4224		buffer->element[element].length = hdr_len;
4225		buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
4226		buf->next_element_to_fill++;
4227		skb->data += hdr_len;
4228		skb->len  -= hdr_len;
4229		large_send = 1;
4230	}
4231	if (skb_shinfo(skb)->nr_frags == 0)
4232		__qeth_fill_buffer(skb, buffer, large_send,
4233				   (int *)&buf->next_element_to_fill);
4234	else
4235		__qeth_fill_buffer_frag(skb, buffer, large_send,
4236					(int *)&buf->next_element_to_fill);
4237
4238	if (!queue->do_pack) {
4239		QETH_DBF_TEXT(trace, 6, "fillbfnp");
4240		/* set state to PRIMED -> will be flushed */
4241		atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4242		flush_cnt = 1;
4243	} else {
4244		QETH_DBF_TEXT(trace, 6, "fillbfpa");
4245		if (queue->card->options.performance_stats)
4246			queue->card->perf_stats.skbs_sent_pack++;
4247		if (buf->next_element_to_fill >=
4248				QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4249			/*
4250			 * packed buffer if full -> set state PRIMED
4251			 * -> will be flushed
4252			 */
4253			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4254			flush_cnt = 1;
4255		}
4256	}
4257	return flush_cnt;
4258}
4259
4260static int
4261qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4262			 struct sk_buff *skb, struct qeth_hdr *hdr,
4263			 int elements_needed,
4264			 struct qeth_eddp_context *ctx)
4265{
4266	struct qeth_qdio_out_buffer *buffer;
4267	int buffers_needed = 0;
4268	int flush_cnt = 0;
4269	int index;
4270
4271	QETH_DBF_TEXT(trace, 6, "dosndpfa");
4272
4273	/* spin until we get the queue ... */
4274	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4275			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4276	/* ... now we've got the queue */
4277	index = queue->next_buf_to_fill;
4278	buffer = &queue->bufs[queue->next_buf_to_fill];
4279	/*
4280	 * check if buffer is empty to make sure that we do not 'overtake'
4281	 * ourselves and try to fill a buffer that is already primed
4282	 */
4283	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4284		goto out;
4285	if (ctx == NULL)
4286		queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4287					  QDIO_MAX_BUFFERS_PER_Q;
4288	else {
4289		buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4290		if (buffers_needed < 0)
4291			goto out;
4292		queue->next_buf_to_fill =
4293			(queue->next_buf_to_fill + buffers_needed) %
4294			QDIO_MAX_BUFFERS_PER_Q;
4295	}
4296	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4297	if (ctx == NULL) {
4298		qeth_fill_buffer(queue, buffer, skb);
4299		qeth_flush_buffers(queue, 0, index, 1);
4300	} else {
4301		flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4302		WARN_ON(buffers_needed != flush_cnt);
4303		qeth_flush_buffers(queue, 0, index, flush_cnt);
4304	}
4305	return 0;
4306out:
4307	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4308	return -EBUSY;
4309}
4310
4311static int
4312qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4313		    struct sk_buff *skb, struct qeth_hdr *hdr,
4314		    int elements_needed, struct qeth_eddp_context *ctx)
4315{
4316	struct qeth_qdio_out_buffer *buffer;
4317	int start_index;
4318	int flush_count = 0;
4319	int do_pack = 0;
4320	int tmp;
4321	int rc = 0;
4322
4323	QETH_DBF_TEXT(trace, 6, "dosndpkt");
4324
4325	/* spin until we get the queue ... */
4326	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4327			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4328	start_index = queue->next_buf_to_fill;
4329	buffer = &queue->bufs[queue->next_buf_to_fill];
4330	/*
4331	 * check if buffer is empty to make sure that we do not 'overtake'
4332	 * ourselves and try to fill a buffer that is already primed
4333	 */
4334	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4335		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4336		return -EBUSY;
4337	}
4338	/* check if we need to switch packing state of this queue */
4339	qeth_switch_to_packing_if_needed(queue);
4340	if (queue->do_pack){
4341		do_pack = 1;
4342		if (ctx == NULL) {
4343			/* does packet fit in current buffer? */
4344			if((QETH_MAX_BUFFER_ELEMENTS(card) -
4345			    buffer->next_element_to_fill) < elements_needed){
4346				/* ... no -> set state PRIMED */
4347				atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4348				flush_count++;
4349				queue->next_buf_to_fill =
4350					(queue->next_buf_to_fill + 1) %
4351					QDIO_MAX_BUFFERS_PER_Q;
4352				buffer = &queue->bufs[queue->next_buf_to_fill];
4353				/* we did a step forward, so check buffer state
4354				 * again */
4355				if (atomic_read(&buffer->state) !=
4356						QETH_QDIO_BUF_EMPTY){
4357					qeth_flush_buffers(queue, 0, start_index, flush_count);
4358					atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4359					return -EBUSY;
4360				}
4361			}
4362		} else {
4363			/* check if we have enough elements (including following
4364			 * free buffers) to handle eddp context */
4365			if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4366				printk("eddp tx_dropped 1\n");
4367				rc = -EBUSY;
4368				goto out;
4369			}
4370		}
4371	}
4372	if (ctx == NULL)
4373		tmp = qeth_fill_buffer(queue, buffer, skb);
4374	else {
4375		tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4376		if (tmp < 0) {
4377			printk("eddp tx_dropped 2\n");
4378			rc = - EBUSY;
4379			goto out;
4380		}
4381	}
4382	queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4383				  QDIO_MAX_BUFFERS_PER_Q;
4384	flush_count += tmp;
4385out:
4386	if (flush_count)
4387		qeth_flush_buffers(queue, 0, start_index, flush_count);
4388	else if (!atomic_read(&queue->set_pci_flags_count))
4389		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4390	/*
4391	 * queue->state will go from LOCKED -> UNLOCKED or from
4392	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4393	 * (switch packing state or flush buffer to get another pci flag out).
4394	 * In that case we will enter this loop
4395	 */
4396	while (atomic_dec_return(&queue->state)){
4397		flush_count = 0;
4398		start_index = queue->next_buf_to_fill;
4399		/* check if we can go back to non-packing state */
4400		flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4401		/*
4402		 * check if we need to flush a packing buffer to get a pci
4403		 * flag out on the queue
4404		 */
4405		if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4406			flush_count += qeth_flush_buffers_on_no_pci(queue);
4407		if (flush_count)
4408			qeth_flush_buffers(queue, 0, start_index, flush_count);
4409	}
4410	/* at this point the queue is UNLOCKED again */
4411	if (queue->card->options.performance_stats && do_pack)
4412		queue->card->perf_stats.bufs_sent_pack += flush_count;
4413
4414	return rc;
4415}
4416
4417static int
4418qeth_get_elements_no(struct qeth_card *card, void *hdr,
4419		     struct sk_buff *skb, int elems)
4420{
4421	int elements_needed = 0;
4422
4423        if (skb_shinfo(skb)->nr_frags > 0)
4424                elements_needed = (skb_shinfo(skb)->nr_frags + 1);
4425        if (elements_needed == 0)
4426                elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4427                                        + skb->len) >> PAGE_SHIFT);
4428	if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
4429                PRINT_ERR("Invalid size of IP packet "
4430			  "(Number=%d / Length=%d). Discarded.\n",
4431                          (elements_needed+elems), skb->len);
4432                return 0;
4433        }
4434        return elements_needed;
4435}
4436
4437
4438static int
4439qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4440{
4441	int ipv = 0;
4442	int cast_type;
4443	struct qeth_qdio_out_q *queue;
4444	struct qeth_hdr *hdr = NULL;
4445	int elements_needed = 0;
4446	enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4447	struct qeth_eddp_context *ctx = NULL;
4448	int tx_bytes = skb->len;
4449	unsigned short nr_frags = skb_shinfo(skb)->nr_frags;
4450	unsigned short tso_size = skb_shinfo(skb)->gso_size;
4451	struct sk_buff *new_skb, *new_skb2;
4452	int rc;
4453
4454	QETH_DBF_TEXT(trace, 6, "sendpkt");
4455
4456	new_skb = skb;
4457	if ((card->info.type == QETH_CARD_TYPE_OSN) &&
4458	    (skb->protocol == htons(ETH_P_IPV6)))
4459		return -EPERM;
4460	cast_type = qeth_get_cast_type(card, skb);
4461	if ((cast_type == RTN_BROADCAST) &&
4462	    (card->info.broadcast_capable == 0))
4463		return -EPERM;
4464	queue = card->qdio.out_qs
4465		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
4466	if (!card->options.layer2) {
4467		ipv = qeth_get_ip_version(skb);
4468		if ((card->dev->hard_header == qeth_fake_header) && ipv) {
4469			new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
4470			if (!new_skb)
4471				return -ENOMEM;
4472			if(card->dev->type == ARPHRD_IEEE802_TR){
4473				skb_pull(new_skb, QETH_FAKE_LL_LEN_TR);
4474			} else {
4475				skb_pull(new_skb, QETH_FAKE_LL_LEN_ETH);
4476			}
4477		}
4478	}
4479	if (skb_is_gso(skb))
4480		large_send = card->options.large_send;
4481	/* check on OSN device*/
4482	if (card->info.type == QETH_CARD_TYPE_OSN)
4483		hdr = (struct qeth_hdr *)new_skb->data;
4484	/*are we able to do TSO ? */
4485	if ((large_send == QETH_LARGE_SEND_TSO) &&
4486	    (cast_type == RTN_UNSPEC)) {
4487		rc = qeth_tso_prepare_packet(card, new_skb, ipv, cast_type);
4488		if (rc) {
4489			__qeth_free_new_skb(skb, new_skb);
4490			return rc;
4491		}
4492		elements_needed++;
4493	} else if (card->info.type != QETH_CARD_TYPE_OSN) {
4494		new_skb2 = qeth_prepare_skb(card, new_skb, &hdr, ipv);
4495		if (!new_skb2) {
4496			__qeth_free_new_skb(skb, new_skb);
4497			return -EINVAL;
4498		}
4499		if (new_skb != skb)
4500			__qeth_free_new_skb(new_skb2, new_skb);
4501		new_skb = new_skb2;
4502		qeth_fill_header(card, hdr, new_skb, ipv, cast_type);
4503	}
4504	if (large_send == QETH_LARGE_SEND_EDDP) {
4505		ctx = qeth_eddp_create_context(card, new_skb, hdr,
4506					       skb->sk->sk_protocol);
4507		if (ctx == NULL) {
4508			__qeth_free_new_skb(skb, new_skb);
4509			PRINT_WARN("could not create eddp context\n");
4510			return -EINVAL;
4511		}
4512	} else {
4513		int elems = qeth_get_elements_no(card,(void*) hdr, new_skb,
4514						 elements_needed);
4515		if (!elems) {
4516			__qeth_free_new_skb(skb, new_skb);
4517			return -EINVAL;
4518		}
4519		elements_needed += elems;
4520	}
4521
4522	if (card->info.type != QETH_CARD_TYPE_IQD)
4523		rc = qeth_do_send_packet(card, queue, new_skb, hdr,
4524					 elements_needed, ctx);
4525	else
4526		rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
4527					      elements_needed, ctx);
4528	if (!rc) {
4529		card->stats.tx_packets++;
4530		card->stats.tx_bytes += tx_bytes;
4531		if (new_skb != skb)
4532			dev_kfree_skb_any(skb);
4533		if (card->options.performance_stats) {
4534			if (tso_size &&
4535			    !(large_send == QETH_LARGE_SEND_NO)) {
4536				card->perf_stats.large_send_bytes += tx_bytes;
4537				card->perf_stats.large_send_cnt++;
4538			}
4539			if (nr_frags > 0) {
4540				card->perf_stats.sg_skbs_sent++;
4541				/* nr_frags + skb->data */
4542				card->perf_stats.sg_frags_sent +=
4543					nr_frags + 1;
4544			}
4545		}
4546	} else {
4547		card->stats.tx_dropped++;
4548		__qeth_free_new_skb(skb, new_skb);
4549	}
4550	if (ctx != NULL) {
4551		/* drop creator's reference */
4552		qeth_eddp_put_context(ctx);
4553		/* free skb; it's not referenced by a buffer */
4554		if (!rc)
4555		       dev_kfree_skb_any(new_skb);
4556	}
4557	return rc;
4558}
4559
4560static int
4561qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4562{
4563	struct qeth_card *card = (struct qeth_card *) dev->priv;
4564	int rc = 0;
4565
4566	switch(regnum){
4567	case MII_BMCR: /* Basic mode control register */
4568		rc = BMCR_FULLDPLX;
4569		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
4570		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4571		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4572			rc |= BMCR_SPEED100;
4573		break;
4574	case MII_BMSR: /* Basic mode status register */
4575		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4576		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4577		     BMSR_100BASE4;
4578		break;
4579	case MII_PHYSID1: /* PHYS ID 1 */
4580		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4581		     dev->dev_addr[2];
4582		rc = (rc >> 5) & 0xFFFF;
4583		break;
4584	case MII_PHYSID2: /* PHYS ID 2 */
4585		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4586		break;
4587	case MII_ADVERTISE: /* Advertisement control reg */
4588		rc = ADVERTISE_ALL;
4589		break;
4590	case MII_LPA: /* Link partner ability reg */
4591		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4592		     LPA_100BASE4 | LPA_LPACK;
4593		break;
4594	case MII_EXPANSION: /* Expansion register */
4595		break;
4596	case MII_DCOUNTER: /* disconnect counter */
4597		break;
4598	case MII_FCSCOUNTER: /* false carrier counter */
4599		break;
4600	case MII_NWAYTEST: /* N-way auto-neg test register */
4601		break;
4602	case MII_RERRCOUNTER: /* rx error counter */
4603		rc = card->stats.rx_errors;
4604		break;
4605	case MII_SREVISION: /* silicon revision */
4606		break;
4607	case MII_RESV1: /* reserved 1 */
4608		break;
4609	case MII_LBRERROR: /* loopback, rx, bypass error */
4610		break;
4611	case MII_PHYADDR: /* physical address */
4612		break;
4613	case MII_RESV2: /* reserved 2 */
4614		break;
4615	case MII_TPISTATUS: /* TPI status for 10mbps */
4616		break;
4617	case MII_NCONFIG: /* network interface config */
4618		break;
4619	default:
4620		break;
4621	}
4622	return rc;
4623}
4624
4625
4626static const char *
4627qeth_arp_get_error_cause(int *rc)
4628{
4629	switch (*rc) {
4630	case QETH_IPA_ARP_RC_FAILED:
4631		*rc = -EIO;
4632		return "operation failed";
4633	case QETH_IPA_ARP_RC_NOTSUPP:
4634		*rc = -EOPNOTSUPP;
4635		return "operation not supported";
4636	case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4637		*rc = -EINVAL;
4638		return "argument out of range";
4639	case QETH_IPA_ARP_RC_Q_NOTSUPP:
4640		*rc = -EOPNOTSUPP;
4641		return "query operation not supported";
4642	case QETH_IPA_ARP_RC_Q_NO_DATA:
4643		*rc = -ENOENT;
4644		return "no query data available";
4645	default:
4646		return "unknown error";
4647	}
4648}
4649
4650static int
4651qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4652			     __u16, long);
4653
4654static int
4655qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4656{
4657	int tmp;
4658	int rc;
4659
4660	QETH_DBF_TEXT(trace,3,"arpstnoe");
4661
4662	/*
4663	 * currently GuestLAN only supports the ARP assist function
4664	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4665	 * thus we say EOPNOTSUPP for this ARP function
4666	 */
4667	if (card->info.guestlan)
4668		return -EOPNOTSUPP;
4669	if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4670		PRINT_WARN("ARP processing not supported "
4671			   "on %s!\n", QETH_CARD_IFNAME(card));
4672		return -EOPNOTSUPP;
4673	}
4674	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4675					  IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4676					  no_entries);
4677	if (rc) {
4678		tmp = rc;
4679		PRINT_WARN("Could not set number of ARP entries on %s: "
4680			   "%s (0x%x/%d)\n",
4681			   QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4682			   tmp, tmp);
4683	}
4684	return rc;
4685}
4686
4687static void
4688qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4689		               struct qeth_arp_query_data *qdata,
4690			       int entry_size, int uentry_size)
4691{
4692	char *entry_ptr;
4693	char *uentry_ptr;
4694	int i;
4695
4696	entry_ptr = (char *)&qdata->data;
4697	uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4698	for (i = 0; i < qdata->no_entries; ++i){
4699		/* strip off 32 bytes "media specific information" */
4700		memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4701		entry_ptr += entry_size;
4702		uentry_ptr += uentry_size;
4703	}
4704}
4705
4706static int
4707qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4708		  unsigned long data)
4709{
4710	struct qeth_ipa_cmd *cmd;
4711	struct qeth_arp_query_data *qdata;
4712	struct qeth_arp_query_info *qinfo;
4713	int entry_size;
4714	int uentry_size;
4715	int i;
4716
4717	QETH_DBF_TEXT(trace,4,"arpquecb");
4718
4719	qinfo = (struct qeth_arp_query_info *) reply->param;
4720	cmd = (struct qeth_ipa_cmd *) data;
4721	if (cmd->hdr.return_code) {
4722		QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4723		return 0;
4724	}
4725	if (cmd->data.setassparms.hdr.return_code) {
4726		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4727		QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4728		return 0;
4729	}
4730	qdata = &cmd->data.setassparms.data.query_arp;
4731	switch(qdata->reply_bits){
4732	case 5:
4733		uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4734		if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4735			uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4736		break;
4737	case 7:
4738		/* fall through to default */
4739	default:
4740		/* tr is the same as eth -> entry7 */
4741		uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4742		if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4743			uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4744		break;
4745	}
4746	/* check if there is enough room in userspace */
4747	if ((qinfo->udata_len - qinfo->udata_offset) <
4748			qdata->no_entries * uentry_size){
4749		QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4750		cmd->hdr.return_code = -ENOMEM;
4751		PRINT_WARN("query ARP user space buffer is too small for "
4752			   "the returned number of ARP entries. "
4753			   "Aborting query!\n");
4754		goto out_error;
4755	}
4756	QETH_DBF_TEXT_(trace, 4, "anore%i",
4757		       cmd->data.setassparms.hdr.number_of_replies);
4758	QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4759	QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4760
4761	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4762		/* strip off "media specific information" */
4763		qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4764					       uentry_size);
4765	} else
4766		/*copy entries to user buffer*/
4767		memcpy(qinfo->udata + qinfo->udata_offset,
4768		       (char *)&qdata->data, qdata->no_entries*uentry_size);
4769
4770	qinfo->no_entries += qdata->no_entries;
4771	qinfo->udata_offset += (qdata->no_entries*uentry_size);
4772	/* check if all replies received ... */
4773	if (cmd->data.setassparms.hdr.seq_no <
4774	    cmd->data.setassparms.hdr.number_of_replies)
4775		return 1;
4776	memcpy(qinfo->udata, &qinfo->no_entries, 4);
4777	/* keep STRIP_ENTRIES flag so the user program can distinguish
4778	 * stripped entries from normal ones */
4779	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4780		qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4781	memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4782	return 0;
4783out_error:
4784	i = 0;
4785	memcpy(qinfo->udata, &i, 4);
4786	return 0;
4787}
4788
4789static int
4790qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4791		      int len, int (*reply_cb)(struct qeth_card *,
4792					       struct qeth_reply *,
4793					       unsigned long),
4794		      void *reply_param)
4795{
4796	QETH_DBF_TEXT(trace,4,"sendarp");
4797
4798	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4799	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4800	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4801	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4802				      reply_cb, reply_param);
4803}
4804
4805static int
4806qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4807		      int len, int (*reply_cb)(struct qeth_card *,
4808					       struct qeth_reply *,
4809					       unsigned long),
4810		      void *reply_param)
4811{
4812	u16 s1, s2;
4813
4814	QETH_DBF_TEXT(trace,4,"sendsnmp");
4815
4816	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4817	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4818	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4819	/* adjust PDU length fields in IPA_PDU_HEADER */
4820	s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4821	s2 = (u32) len;
4822	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4823	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4824	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4825	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4826	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4827				      reply_cb, reply_param);
4828}
4829
4830static struct qeth_cmd_buffer *
4831qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4832			 __u16, __u16, enum qeth_prot_versions);
4833static int
4834qeth_arp_query(struct qeth_card *card, char __user *udata)
4835{
4836	struct qeth_cmd_buffer *iob;
4837	struct qeth_arp_query_info qinfo = {0, };
4838	int tmp;
4839	int rc;
4840
4841	QETH_DBF_TEXT(trace,3,"arpquery");
4842
4843	if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4844			       IPA_ARP_PROCESSING)) {
4845		PRINT_WARN("ARP processing not supported "
4846			   "on %s!\n", QETH_CARD_IFNAME(card));
4847		return -EOPNOTSUPP;
4848	}
4849	/* get size of userspace buffer and mask_bits -> 6 bytes */
4850	if (copy_from_user(&qinfo, udata, 6))
4851		return -EFAULT;
4852	if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL)))
4853		return -ENOMEM;
4854	qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4855	iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4856				       IPA_CMD_ASS_ARP_QUERY_INFO,
4857				       sizeof(int),QETH_PROT_IPV4);
4858
4859	rc = qeth_send_ipa_arp_cmd(card, iob,
4860				   QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4861				   qeth_arp_query_cb, (void *)&qinfo);
4862	if (rc) {
4863		tmp = rc;
4864		PRINT_WARN("Error while querying ARP cache on %s: %s "
4865			   "(0x%x/%d)\n",
4866			   QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4867			   tmp, tmp);
4868		if (copy_to_user(udata, qinfo.udata, 4))
4869			rc = -EFAULT;
4870	} else {
4871		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4872			rc = -EFAULT;
4873	}
4874	kfree(qinfo.udata);
4875	return rc;
4876}
4877
4878/**
4879 * SNMP command callback
4880 */
4881static int
4882qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4883		     unsigned long sdata)
4884{
4885	struct qeth_ipa_cmd *cmd;
4886	struct qeth_arp_query_info *qinfo;
4887	struct qeth_snmp_cmd *snmp;
4888	unsigned char *data;
4889	__u16 data_len;
4890
4891	QETH_DBF_TEXT(trace,3,"snpcmdcb");
4892
4893	cmd = (struct qeth_ipa_cmd *) sdata;
4894	data = (unsigned char *)((char *)cmd - reply->offset);
4895	qinfo = (struct qeth_arp_query_info *) reply->param;
4896	snmp = &cmd->data.setadapterparms.data.snmp;
4897
4898	if (cmd->hdr.return_code) {
4899		QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4900		return 0;
4901	}
4902	if (cmd->data.setadapterparms.hdr.return_code) {
4903		cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4904		QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4905		return 0;
4906	}
4907	data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4908	if (cmd->data.setadapterparms.hdr.seq_no == 1)
4909		data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4910	else
4911		data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4912
4913	/* check if there is enough room in userspace */
4914	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4915		QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4916		cmd->hdr.return_code = -ENOMEM;
4917		return 0;
4918	}
4919	QETH_DBF_TEXT_(trace, 4, "snore%i",
4920		       cmd->data.setadapterparms.hdr.used_total);
4921	QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4922	/*copy entries to user buffer*/
4923	if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4924		memcpy(qinfo->udata + qinfo->udata_offset,
4925		       (char *)snmp,
4926		       data_len + offsetof(struct qeth_snmp_cmd,data));
4927		qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4928	} else {
4929		memcpy(qinfo->udata + qinfo->udata_offset,
4930		       (char *)&snmp->request, data_len);
4931	}
4932	qinfo->udata_offset += data_len;
4933	/* check if all replies received ... */
4934		QETH_DBF_TEXT_(trace, 4, "srtot%i",
4935			       cmd->data.setadapterparms.hdr.used_total);
4936		QETH_DBF_TEXT_(trace, 4, "srseq%i",
4937			       cmd->data.setadapterparms.hdr.seq_no);
4938	if (cmd->data.setadapterparms.hdr.seq_no <
4939	    cmd->data.setadapterparms.hdr.used_total)
4940		return 1;
4941	return 0;
4942}
4943
4944static struct qeth_cmd_buffer *
4945qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4946		       enum qeth_prot_versions );
4947
4948static struct qeth_cmd_buffer *
4949qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4950{
4951	struct qeth_cmd_buffer *iob;
4952	struct qeth_ipa_cmd *cmd;
4953
4954	iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4955				     QETH_PROT_IPV4);
4956	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4957	cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4958	cmd->data.setadapterparms.hdr.command_code = command;
4959	cmd->data.setadapterparms.hdr.used_total = 1;
4960	cmd->data.setadapterparms.hdr.seq_no = 1;
4961
4962	return iob;
4963}
4964
4965/**
4966 * function to send SNMP commands to OSA-E card
4967 */
4968static int
4969qeth_snmp_command(struct qeth_card *card, char __user *udata)
4970{
4971	struct qeth_cmd_buffer *iob;
4972	struct qeth_ipa_cmd *cmd;
4973	struct qeth_snmp_ureq *ureq;
4974	int req_len;
4975	struct qeth_arp_query_info qinfo = {0, };
4976	int rc = 0;
4977
4978	QETH_DBF_TEXT(trace,3,"snmpcmd");
4979
4980	if (card->info.guestlan)
4981		return -EOPNOTSUPP;
4982
4983	if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
4984	    (!card->options.layer2) ) {
4985		PRINT_WARN("SNMP Query MIBS not supported "
4986			   "on %s!\n", QETH_CARD_IFNAME(card));
4987		return -EOPNOTSUPP;
4988	}
4989	/* skip 4 bytes (data_len struct member) to get req_len */
4990	if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4991		return -EFAULT;
4992	ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4993	if (!ureq) {
4994		QETH_DBF_TEXT(trace, 2, "snmpnome");
4995		return -ENOMEM;
4996	}
4997	if (copy_from_user(ureq, udata,
4998			req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4999		kfree(ureq);
5000		return -EFAULT;
5001	}
5002	qinfo.udata_len = ureq->hdr.data_len;
5003	if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))){
5004		kfree(ureq);
5005		return -ENOMEM;
5006	}
5007	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
5008
5009	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
5010				   QETH_SNMP_SETADP_CMDLENGTH + req_len);
5011	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5012	memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
5013	rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
5014				    qeth_snmp_command_cb, (void *)&qinfo);
5015	if (rc)
5016		PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
5017			   QETH_CARD_IFNAME(card), rc);
5018	else {
5019		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
5020			rc = -EFAULT;
5021	}
5022
5023	kfree(ureq);
5024	kfree(qinfo.udata);
5025	return rc;
5026}
5027
5028static int
5029qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
5030			    unsigned long);
5031
5032static int
5033qeth_default_setadapterparms_cb(struct qeth_card *card,
5034                                struct qeth_reply *reply,
5035                                unsigned long data);
5036static int
5037qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
5038		      __u16, long,
5039		      int (*reply_cb)
5040		      (struct qeth_card *, struct qeth_reply *, unsigned long),
5041		      void *reply_param);
5042
5043static int
5044qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5045{
5046	struct qeth_cmd_buffer *iob;
5047	char buf[16];
5048	int tmp;
5049	int rc;
5050
5051	QETH_DBF_TEXT(trace,3,"arpadent");
5052
5053	/*
5054	 * currently GuestLAN only supports the ARP assist function
5055	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
5056	 * thus we say EOPNOTSUPP for this ARP function
5057	 */
5058	if (card->info.guestlan)
5059		return -EOPNOTSUPP;
5060	if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5061		PRINT_WARN("ARP processing not supported "
5062			   "on %s!\n", QETH_CARD_IFNAME(card));
5063		return -EOPNOTSUPP;
5064	}
5065
5066	iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5067				       IPA_CMD_ASS_ARP_ADD_ENTRY,
5068				       sizeof(struct qeth_arp_cache_entry),
5069				       QETH_PROT_IPV4);
5070	rc = qeth_send_setassparms(card, iob,
5071				   sizeof(struct qeth_arp_cache_entry),
5072				   (unsigned long) entry,
5073				   qeth_default_setassparms_cb, NULL);
5074	if (rc) {
5075		tmp = rc;
5076		qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5077		PRINT_WARN("Could not add ARP entry for address %s on %s: "
5078			   "%s (0x%x/%d)\n",
5079			   buf, QETH_CARD_IFNAME(card),
5080			   qeth_arp_get_error_cause(&rc), tmp, tmp);
5081	}
5082	return rc;
5083}
5084
5085static int
5086qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5087{
5088	struct qeth_cmd_buffer *iob;
5089	char buf[16] = {0, };
5090	int tmp;
5091	int rc;
5092
5093	QETH_DBF_TEXT(trace,3,"arprment");
5094
5095	/*
5096	 * currently GuestLAN only supports the ARP assist function
5097	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
5098	 * thus we say EOPNOTSUPP for this ARP function
5099	 */
5100	if (card->info.guestlan)
5101		return -EOPNOTSUPP;
5102	if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5103		PRINT_WARN("ARP processing not supported "
5104			   "on %s!\n", QETH_CARD_IFNAME(card));
5105		return -EOPNOTSUPP;
5106	}
5107	memcpy(buf, entry, 12);
5108	iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5109				       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
5110				       12,
5111				       QETH_PROT_IPV4);
5112	rc = qeth_send_setassparms(card, iob,
5113				   12, (unsigned long)buf,
5114				   qeth_default_setassparms_cb, NULL);
5115	if (rc) {
5116		tmp = rc;
5117		memset(buf, 0, 16);
5118		qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5119		PRINT_WARN("Could not delete ARP entry for address %s on %s: "
5120			   "%s (0x%x/%d)\n",
5121			   buf, QETH_CARD_IFNAME(card),
5122			   qeth_arp_get_error_cause(&rc), tmp, tmp);
5123	}
5124	return rc;
5125}
5126
5127static int
5128qeth_arp_flush_cache(struct qeth_card *card)
5129{
5130	int rc;
5131	int tmp;
5132
5133	QETH_DBF_TEXT(trace,3,"arpflush");
5134
5135	/*
5136	 * currently GuestLAN only supports the ARP assist function
5137	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
5138	 * thus we say EOPNOTSUPP for this ARP function
5139	*/
5140	if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
5141		return -EOPNOTSUPP;
5142	if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5143		PRINT_WARN("ARP processing not supported "
5144			   "on %s!\n", QETH_CARD_IFNAME(card));
5145		return -EOPNOTSUPP;
5146	}
5147	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
5148					  IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
5149	if (rc){
5150		tmp = rc;
5151		PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
5152			   QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5153			   tmp, tmp);
5154	}
5155	return rc;
5156}
5157
5158static int
5159qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5160{
5161	struct qeth_card *card = (struct qeth_card *)dev->priv;
5162	struct qeth_arp_cache_entry arp_entry;
5163	struct mii_ioctl_data *mii_data;
5164	int rc = 0;
5165
5166	if (!card)
5167		return -ENODEV;
5168
5169	if ((card->state != CARD_STATE_UP) &&
5170            (card->state != CARD_STATE_SOFTSETUP))
5171		return -ENODEV;
5172
5173	if (card->info.type == QETH_CARD_TYPE_OSN)
5174		return -EPERM;
5175
5176	switch (cmd){
5177	case SIOC_QETH_ARP_SET_NO_ENTRIES:
5178		if ( !capable(CAP_NET_ADMIN) ||
5179		     (card->options.layer2) ) {
5180			rc = -EPERM;
5181			break;
5182		}
5183		rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
5184		break;
5185	case SIOC_QETH_ARP_QUERY_INFO:
5186		if ( !capable(CAP_NET_ADMIN) ||
5187		     (card->options.layer2) ) {
5188			rc = -EPERM;
5189			break;
5190		}
5191		rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
5192		break;
5193	case SIOC_QETH_ARP_ADD_ENTRY:
5194		if ( !capable(CAP_NET_ADMIN) ||
5195		     (card->options.layer2) ) {
5196			rc = -EPERM;
5197			break;
5198		}
5199		if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5200				   sizeof(struct qeth_arp_cache_entry)))
5201			rc = -EFAULT;
5202		else
5203			rc = qeth_arp_add_entry(card, &arp_entry);
5204		break;
5205	case SIOC_QETH_ARP_REMOVE_ENTRY:
5206		if ( !capable(CAP_NET_ADMIN) ||
5207		     (card->options.layer2) ) {
5208			rc = -EPERM;
5209			break;
5210		}
5211		if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5212				   sizeof(struct qeth_arp_cache_entry)))
5213			rc = -EFAULT;
5214		else
5215			rc = qeth_arp_remove_entry(card, &arp_entry);
5216		break;
5217	case SIOC_QETH_ARP_FLUSH_CACHE:
5218		if ( !capable(CAP_NET_ADMIN) ||
5219		     (card->options.layer2) ) {
5220			rc = -EPERM;
5221			break;
5222		}
5223		rc = qeth_arp_flush_cache(card);
5224		break;
5225	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5226		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5227		break;
5228	case SIOC_QETH_GET_CARD_TYPE:
5229		if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
5230		    !card->info.guestlan)
5231			return 1;
5232		return 0;
5233		break;
5234	case SIOCGMIIPHY:
5235		mii_data = if_mii(rq);
5236		mii_data->phy_id = 0;
5237		break;
5238	case SIOCGMIIREG:
5239		mii_data = if_mii(rq);
5240		if (mii_data->phy_id != 0)
5241			rc = -EINVAL;
5242		else
5243			mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
5244							   mii_data->reg_num);
5245		break;
5246	default:
5247		rc = -EOPNOTSUPP;
5248	}
5249	if (rc)
5250		QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5251	return rc;
5252}
5253
5254static struct net_device_stats *
5255qeth_get_stats(struct net_device *dev)
5256{
5257	struct qeth_card *card;
5258
5259	card = (struct qeth_card *) (dev->priv);
5260
5261	QETH_DBF_TEXT(trace,5,"getstat");
5262
5263	return &card->stats;
5264}
5265
5266static int
5267qeth_change_mtu(struct net_device *dev, int new_mtu)
5268{
5269	struct qeth_card *card;
5270	char dbf_text[15];
5271
5272	card = (struct qeth_card *) (dev->priv);
5273
5274	QETH_DBF_TEXT(trace,4,"chgmtu");
5275	sprintf(dbf_text, "%8x", new_mtu);
5276	QETH_DBF_TEXT(trace,4,dbf_text);
5277
5278	if (new_mtu < 64)
5279		return -EINVAL;
5280	if (new_mtu > 65535)
5281		return -EINVAL;
5282	if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5283	    (!qeth_mtu_is_valid(card, new_mtu)))
5284		return -EINVAL;
5285	dev->mtu = new_mtu;
5286	return 0;
5287}
5288
5289#ifdef CONFIG_QETH_VLAN
5290static void
5291qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5292{
5293	struct qeth_card *card;
5294	unsigned long flags;
5295
5296	QETH_DBF_TEXT(trace,4,"vlanreg");
5297
5298	card = (struct qeth_card *) dev->priv;
5299	spin_lock_irqsave(&card->vlanlock, flags);
5300	card->vlangrp = grp;
5301	spin_unlock_irqrestore(&card->vlanlock, flags);
5302}
5303
5304static void
5305qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5306		      unsigned short vid)
5307{
5308	int i;
5309	struct sk_buff *skb;
5310	struct sk_buff_head tmp_list;
5311
5312	skb_queue_head_init(&tmp_list);
5313	lockdep_set_class(&tmp_list.lock, &qdio_out_skb_queue_key);
5314	for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5315		while ((skb = skb_dequeue(&buf->skb_list))){
5316			if (vlan_tx_tag_present(skb) &&
5317			    (vlan_tx_tag_get(skb) == vid)) {
5318				atomic_dec(&skb->users);
5319				dev_kfree_skb(skb);
5320			} else
5321				skb_queue_tail(&tmp_list, skb);
5322		}
5323	}
5324	while ((skb = skb_dequeue(&tmp_list)))
5325		skb_queue_tail(&buf->skb_list, skb);
5326}
5327
5328static void
5329qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5330{
5331	int i, j;
5332
5333	QETH_DBF_TEXT(trace, 4, "frvlskbs");
5334	for (i = 0; i < card->qdio.no_out_queues; ++i){
5335		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5336			qeth_free_vlan_buffer(card, &card->qdio.
5337					      out_qs[i]->bufs[j], vid);
5338	}
5339}
5340
5341static void
5342qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5343{
5344	struct in_device *in_dev;
5345	struct in_ifaddr *ifa;
5346	struct qeth_ipaddr *addr;
5347
5348	QETH_DBF_TEXT(trace, 4, "frvaddr4");
5349
5350	rcu_read_lock();
5351	in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
5352	if (!in_dev)
5353		goto out;
5354	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5355		addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5356		if (addr){
5357			addr->u.a4.addr = ifa->ifa_address;
5358			addr->u.a4.mask = ifa->ifa_mask;
5359			addr->type = QETH_IP_TYPE_NORMAL;
5360			if (!qeth_delete_ip(card, addr))
5361				kfree(addr);
5362		}
5363	}
5364out:
5365	rcu_read_unlock();
5366}
5367
5368static void
5369qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5370{
5371#ifdef CONFIG_QETH_IPV6
5372	struct inet6_dev *in6_dev;
5373	struct inet6_ifaddr *ifa;
5374	struct qeth_ipaddr *addr;
5375
5376	QETH_DBF_TEXT(trace, 4, "frvaddr6");
5377
5378	in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
5379	if (!in6_dev)
5380		return;
5381	for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5382		addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5383		if (addr){
5384			memcpy(&addr->u.a6.addr, &ifa->addr,
5385			       sizeof(struct in6_addr));
5386			addr->u.a6.pfxlen = ifa->prefix_len;
5387			addr->type = QETH_IP_TYPE_NORMAL;
5388			if (!qeth_delete_ip(card, addr))
5389				kfree(addr);
5390		}
5391	}
5392	in6_dev_put(in6_dev);
5393#endif /* CONFIG_QETH_IPV6 */
5394}
5395
5396static void
5397qeth_free_vlan_addresses(struct qeth_card *card, unsigned short vid)
5398{
5399	if (card->options.layer2 || !card->vlangrp)
5400		return;
5401	qeth_free_vlan_addresses4(card, vid);
5402	qeth_free_vlan_addresses6(card, vid);
5403}
5404
5405static int
5406qeth_layer2_send_setdelvlan_cb(struct qeth_card *card,
5407                               struct qeth_reply *reply,
5408                               unsigned long data)
5409{
5410        struct qeth_ipa_cmd *cmd;
5411
5412        QETH_DBF_TEXT(trace, 2, "L2sdvcb");
5413        cmd = (struct qeth_ipa_cmd *) data;
5414        if (cmd->hdr.return_code) {
5415		PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5416			  "Continuing\n",cmd->data.setdelvlan.vlan_id,
5417			  QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5418		QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
5419		QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5420		QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5421	}
5422        return 0;
5423}
5424
5425static int
5426qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5427			    enum qeth_ipa_cmds ipacmd)
5428{
5429	struct qeth_ipa_cmd *cmd;
5430	struct qeth_cmd_buffer *iob;
5431
5432	QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5433	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5434	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5435        cmd->data.setdelvlan.vlan_id = i;
5436	return qeth_send_ipa_cmd(card, iob,
5437				 qeth_layer2_send_setdelvlan_cb, NULL);
5438}
5439
5440static void
5441qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5442{
5443        unsigned short  i;
5444
5445	QETH_DBF_TEXT(trace, 3, "L2prcvln");
5446
5447	if (!card->vlangrp)
5448		return;
5449	for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5450		if (vlan_group_get_device(card->vlangrp, i) == NULL)
5451			continue;
5452		if (clear)
5453			qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5454		else
5455			qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5456        }
5457}
5458
5459/*add_vid is layer 2 used only ....*/
5460static void
5461qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5462{
5463	struct qeth_card *card;
5464
5465	QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5466
5467	card = (struct qeth_card *) dev->priv;
5468	if (!card->options.layer2)
5469		return;
5470	qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5471}
5472
5473/*... kill_vid used for both modes*/
5474static void
5475qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5476{
5477	struct qeth_card *card;
5478	unsigned long flags;
5479
5480	QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5481
5482	card = (struct qeth_card *) dev->priv;
5483	/* free all skbs for the vlan device */
5484	qeth_free_vlan_skbs(card, vid);
5485	spin_lock_irqsave(&card->vlanlock, flags);
5486	/* unregister IP addresses of vlan device */
5487	qeth_free_vlan_addresses(card, vid);
5488	vlan_group_set_device(card->vlangrp, vid, NULL);
5489	spin_unlock_irqrestore(&card->vlanlock, flags);
5490	if (card->options.layer2)
5491		qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5492	qeth_set_multicast_list(card->dev);
5493}
5494#endif
5495/**
5496 * Examine hardware response to SET_PROMISC_MODE
5497 */
5498static int
5499qeth_setadp_promisc_mode_cb(struct qeth_card *card,
5500			    struct qeth_reply *reply,
5501			    unsigned long data)
5502{
5503	struct qeth_ipa_cmd *cmd;
5504	struct qeth_ipacmd_setadpparms *setparms;
5505
5506	QETH_DBF_TEXT(trace,4,"prmadpcb");
5507
5508	cmd = (struct qeth_ipa_cmd *) data;
5509	setparms = &(cmd->data.setadapterparms);
5510
5511        qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5512	if (cmd->hdr.return_code) {
5513		QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code);
5514		setparms->data.mode = SET_PROMISC_MODE_OFF;
5515	}
5516	card->info.promisc_mode = setparms->data.mode;
5517	return 0;
5518}
5519/*
5520 * Set promiscuous mode (on or off) (SET_PROMISC_MODE command)
5521 */
5522static void
5523qeth_setadp_promisc_mode(struct qeth_card *card)
5524{
5525	enum qeth_ipa_promisc_modes mode;
5526	struct net_device *dev = card->dev;
5527	struct qeth_cmd_buffer *iob;
5528	struct qeth_ipa_cmd *cmd;
5529
5530	QETH_DBF_TEXT(trace, 4, "setprom");
5531
5532	if (((dev->flags & IFF_PROMISC) &&
5533	     (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
5534	    (!(dev->flags & IFF_PROMISC) &&
5535	     (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
5536		return;
5537	mode = SET_PROMISC_MODE_OFF;
5538	if (dev->flags & IFF_PROMISC)
5539		mode = SET_PROMISC_MODE_ON;
5540	QETH_DBF_TEXT_(trace, 4, "mode:%x", mode);
5541
5542	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
5543			sizeof(struct qeth_ipacmd_setadpparms));
5544	cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
5545	cmd->data.setadapterparms.data.mode = mode;
5546	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
5547}
5548
5549/**
5550 * set multicast address on card
5551 */
5552static void
5553qeth_set_multicast_list(struct net_device *dev)
5554{
5555	struct qeth_card *card = (struct qeth_card *) dev->priv;
5556
5557	if (card->info.type == QETH_CARD_TYPE_OSN)
5558		return ;
5559
5560	QETH_DBF_TEXT(trace, 3, "setmulti");
5561	qeth_delete_mc_addresses(card);
5562	if (card->options.layer2) {
5563		qeth_layer2_add_multicast(card);
5564		goto out;
5565	}
5566	qeth_add_multicast_ipv4(card);
5567#ifdef CONFIG_QETH_IPV6
5568	qeth_add_multicast_ipv6(card);
5569#endif
5570out:
5571	qeth_set_ip_addr_list(card);
5572	if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
5573		return;
5574	qeth_setadp_promisc_mode(card);
5575}
5576
5577static int
5578qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5579{
5580	return 0;
5581}
5582
5583static void
5584qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5585{
5586	if (dev->type == ARPHRD_IEEE802_TR)
5587		ip_tr_mc_map(ipm, mac);
5588	else
5589		ip_eth_mc_map(ipm, mac);
5590}
5591
5592static struct qeth_ipaddr *
5593qeth_get_addr_buffer(enum qeth_prot_versions prot)
5594{
5595	struct qeth_ipaddr *addr;
5596
5597	addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5598	if (addr == NULL) {
5599		PRINT_WARN("Not enough memory to add address\n");
5600		return NULL;
5601	}
5602	addr->type = QETH_IP_TYPE_NORMAL;
5603	addr->proto = prot;
5604	return addr;
5605}
5606
5607int
5608qeth_osn_assist(struct net_device *dev,
5609		void *data,
5610		int data_len)
5611{
5612	struct qeth_cmd_buffer *iob;
5613	struct qeth_card *card;
5614	int rc;
5615
5616	QETH_DBF_TEXT(trace, 2, "osnsdmc");
5617	if (!dev)
5618		return -ENODEV;
5619	card = (struct qeth_card *)dev->priv;
5620	if (!card)
5621		return -ENODEV;
5622	if ((card->state != CARD_STATE_UP) &&
5623	    (card->state != CARD_STATE_SOFTSETUP))
5624		return -ENODEV;
5625	iob = qeth_wait_for_buffer(&card->write);
5626	memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
5627	rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
5628	return rc;
5629}
5630
5631static struct net_device *
5632qeth_netdev_by_devno(unsigned char *read_dev_no)
5633{
5634	struct qeth_card *card;
5635	struct net_device *ndev;
5636	unsigned char *readno;
5637	__u16 temp_dev_no, card_dev_no;
5638	char *endp;
5639	unsigned long flags;
5640
5641	ndev = NULL;
5642	memcpy(&temp_dev_no, read_dev_no, 2);
5643	read_lock_irqsave(&qeth_card_list.rwlock, flags);
5644	list_for_each_entry(card, &qeth_card_list.list, list) {
5645		readno = CARD_RDEV_ID(card);
5646		readno += (strlen(readno) - 4);
5647		card_dev_no = simple_strtoul(readno, &endp, 16);
5648		if (card_dev_no == temp_dev_no) {
5649			ndev = card->dev;
5650			break;
5651		}
5652	}
5653	read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
5654	return ndev;
5655}
5656
5657int
5658qeth_osn_register(unsigned char *read_dev_no,
5659		  struct net_device **dev,
5660		  int (*assist_cb)(struct net_device *, void *),
5661		  int (*data_cb)(struct sk_buff *))
5662{
5663	struct qeth_card * card;
5664
5665	QETH_DBF_TEXT(trace, 2, "osnreg");
5666	*dev = qeth_netdev_by_devno(read_dev_no);
5667	if (*dev == NULL)
5668		return -ENODEV;
5669	card = (struct qeth_card *)(*dev)->priv;
5670	if (!card)
5671		return -ENODEV;
5672	if ((assist_cb == NULL) || (data_cb == NULL))
5673		return -EINVAL;
5674	card->osn_info.assist_cb = assist_cb;
5675	card->osn_info.data_cb = data_cb;
5676	return 0;
5677}
5678
5679void
5680qeth_osn_deregister(struct net_device * dev)
5681{
5682	struct qeth_card *card;
5683
5684	QETH_DBF_TEXT(trace, 2, "osndereg");
5685	if (!dev)
5686		return;
5687	card = (struct qeth_card *)dev->priv;
5688	if (!card)
5689		return;
5690	card->osn_info.assist_cb = NULL;
5691	card->osn_info.data_cb = NULL;
5692	return;
5693}
5694
5695static void
5696qeth_delete_mc_addresses(struct qeth_card *card)
5697{
5698	struct qeth_ipaddr *iptodo;
5699	unsigned long flags;
5700
5701	QETH_DBF_TEXT(trace,4,"delmc");
5702	iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5703	if (!iptodo) {
5704		QETH_DBF_TEXT(trace, 2, "dmcnomem");
5705		return;
5706	}
5707	iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5708	spin_lock_irqsave(&card->ip_lock, flags);
5709	if (!__qeth_insert_ip_todo(card, iptodo, 0))
5710		kfree(iptodo);
5711	spin_unlock_irqrestore(&card->ip_lock, flags);
5712}
5713
5714static void
5715qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5716{
5717	struct qeth_ipaddr *ipm;
5718	struct ip_mc_list *im4;
5719	char buf[MAX_ADDR_LEN];
5720
5721	QETH_DBF_TEXT(trace,4,"addmc");
5722	for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5723		qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5724		ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5725		if (!ipm)
5726			continue;
5727		ipm->u.a4.addr = im4->multiaddr;
5728		memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5729		ipm->is_multicast = 1;
5730		if (!qeth_add_ip(card,ipm))
5731			kfree(ipm);
5732	}
5733}
5734
5735static inline void
5736qeth_add_vlan_mc(struct qeth_card *card)
5737{
5738#ifdef CONFIG_QETH_VLAN
5739	struct in_device *in_dev;
5740	struct vlan_group *vg;
5741	int i;
5742
5743	QETH_DBF_TEXT(trace,4,"addmcvl");
5744	if ( ((card->options.layer2 == 0) &&
5745	      (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5746	     (card->vlangrp == NULL) )
5747		return ;
5748
5749	vg = card->vlangrp;
5750	for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5751		struct net_device *netdev = vlan_group_get_device(vg, i);
5752		if (netdev == NULL ||
5753		    !(netdev->flags & IFF_UP))
5754			continue;
5755		in_dev = in_dev_get(netdev);
5756		if (!in_dev)
5757			continue;
5758		read_lock(&in_dev->mc_list_lock);
5759		qeth_add_mc(card,in_dev);
5760		read_unlock(&in_dev->mc_list_lock);
5761		in_dev_put(in_dev);
5762	}
5763#endif
5764}
5765
5766static void
5767qeth_add_multicast_ipv4(struct qeth_card *card)
5768{
5769	struct in_device *in4_dev;
5770
5771	QETH_DBF_TEXT(trace,4,"chkmcv4");
5772	in4_dev = in_dev_get(card->dev);
5773	if (in4_dev == NULL)
5774		return;
5775	read_lock(&in4_dev->mc_list_lock);
5776	qeth_add_mc(card, in4_dev);
5777	qeth_add_vlan_mc(card);
5778	read_unlock(&in4_dev->mc_list_lock);
5779	in_dev_put(in4_dev);
5780}
5781
5782static void
5783qeth_layer2_add_multicast(struct qeth_card *card)
5784{
5785	struct qeth_ipaddr *ipm;
5786	struct dev_mc_list *dm;
5787
5788	QETH_DBF_TEXT(trace,4,"L2addmc");
5789	for (dm = card->dev->mc_list; dm; dm = dm->next) {
5790		ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5791		if (!ipm)
5792			continue;
5793		memcpy(ipm->mac,dm->dmi_addr,MAX_ADDR_LEN);
5794		ipm->is_multicast = 1;
5795		if (!qeth_add_ip(card, ipm))
5796			kfree(ipm);
5797	}
5798}
5799
5800#ifdef CONFIG_QETH_IPV6
5801static void
5802qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5803{
5804	struct qeth_ipaddr *ipm;
5805	struct ifmcaddr6 *im6;
5806	char buf[MAX_ADDR_LEN];
5807
5808	QETH_DBF_TEXT(trace,4,"addmc6");
5809	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
5810		ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
5811		ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
5812		if (!ipm)
5813			continue;
5814		ipm->is_multicast = 1;
5815		memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5816		memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
5817		       sizeof(struct in6_addr));
5818		if (!qeth_add_ip(card,ipm))
5819			kfree(ipm);
5820	}
5821}
5822
5823static inline void
5824qeth_add_vlan_mc6(struct qeth_card *card)
5825{
5826#ifdef CONFIG_QETH_VLAN
5827	struct inet6_dev *in_dev;
5828	struct vlan_group *vg;
5829	int i;
5830
5831	QETH_DBF_TEXT(trace,4,"admc6vl");
5832	if ( ((card->options.layer2 == 0) &&
5833	      (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5834	     (card->vlangrp == NULL))
5835		return ;
5836
5837	vg = card->vlangrp;
5838	for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5839		struct net_device *netdev = vlan_group_get_device(vg, i);
5840		if (netdev == NULL ||
5841		    !(netdev->flags & IFF_UP))
5842			continue;
5843		in_dev = in6_dev_get(netdev);
5844		if (!in_dev)
5845			continue;
5846		read_lock_bh(&in_dev->lock);
5847		qeth_add_mc6(card,in_dev);
5848		read_unlock_bh(&in_dev->lock);
5849		in6_dev_put(in_dev);
5850	}
5851#endif /* CONFIG_QETH_VLAN */
5852}
5853
5854static void
5855qeth_add_multicast_ipv6(struct qeth_card *card)
5856{
5857	struct inet6_dev *in6_dev;
5858
5859	QETH_DBF_TEXT(trace,4,"chkmcv6");
5860	if (!qeth_is_supported(card, IPA_IPV6))
5861		return ;
5862	in6_dev = in6_dev_get(card->dev);
5863	if (in6_dev == NULL)
5864		return;
5865	read_lock_bh(&in6_dev->lock);
5866	qeth_add_mc6(card, in6_dev);
5867	qeth_add_vlan_mc6(card);
5868	read_unlock_bh(&in6_dev->lock);
5869	in6_dev_put(in6_dev);
5870}
5871#endif /* CONFIG_QETH_IPV6 */
5872
5873static int
5874qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
5875			   enum qeth_ipa_cmds ipacmd,
5876			   int (*reply_cb) (struct qeth_card *,
5877					    struct qeth_reply*,
5878					    unsigned long))
5879{
5880	struct qeth_ipa_cmd *cmd;
5881	struct qeth_cmd_buffer *iob;
5882
5883	QETH_DBF_TEXT(trace, 2, "L2sdmac");
5884	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5885	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5886        cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
5887        memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
5888	return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
5889}
5890
5891static int
5892qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
5893				struct qeth_reply *reply,
5894				unsigned long data)
5895{
5896	struct qeth_ipa_cmd *cmd;
5897	__u8 *mac;
5898
5899	QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
5900	cmd = (struct qeth_ipa_cmd *) data;
5901	mac = &cmd->data.setdelmac.mac[0];
5902	/* MAC already registered, needed in couple/uncouple case */
5903	if (cmd->hdr.return_code == 0x2005) {
5904		PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
5905			  "already existing on %s \n",
5906			  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5907			  QETH_CARD_IFNAME(card));
5908		cmd->hdr.return_code = 0;
5909	}
5910	if (cmd->hdr.return_code)
5911		PRINT_ERR("Could not set group MAC " \
5912			  "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5913			  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5914			  QETH_CARD_IFNAME(card),cmd->hdr.return_code);
5915	return 0;
5916}
5917
5918static int
5919qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
5920{
5921	QETH_DBF_TEXT(trace, 2, "L2Sgmac");
5922	return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
5923					  qeth_layer2_send_setgroupmac_cb);
5924}
5925
5926static int
5927qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
5928				struct qeth_reply *reply,
5929				unsigned long data)
5930{
5931	struct qeth_ipa_cmd *cmd;
5932	__u8 *mac;
5933
5934	QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
5935	cmd = (struct qeth_ipa_cmd *) data;
5936	mac = &cmd->data.setdelmac.mac[0];
5937	if (cmd->hdr.return_code)
5938		PRINT_ERR("Could not delete group MAC " \
5939			  "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5940			  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5941			  QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5942	return 0;
5943}
5944
5945static int
5946qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
5947{
5948	QETH_DBF_TEXT(trace, 2, "L2Dgmac");
5949	return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
5950					  qeth_layer2_send_delgroupmac_cb);
5951}
5952
5953static int
5954qeth_layer2_send_setmac_cb(struct qeth_card *card,
5955			   struct qeth_reply *reply,
5956			   unsigned long data)
5957{
5958	struct qeth_ipa_cmd *cmd;
5959
5960	QETH_DBF_TEXT(trace, 2, "L2Smaccb");
5961	cmd = (struct qeth_ipa_cmd *) data;
5962	if (cmd->hdr.return_code) {
5963		QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
5964		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
5965		cmd->hdr.return_code = -EIO;
5966	} else {
5967		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
5968		memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
5969		       OSA_ADDR_LEN);
5970		PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
5971			   "successfully registered on device %s\n",
5972			   card->dev->dev_addr[0], card->dev->dev_addr[1],
5973			   card->dev->dev_addr[2], card->dev->dev_addr[3],
5974			   card->dev->dev_addr[4], card->dev->dev_addr[5],
5975			   card->dev->name);
5976	}
5977	return 0;
5978}
5979
5980static int
5981qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
5982{
5983	QETH_DBF_TEXT(trace, 2, "L2Setmac");
5984	return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
5985					  qeth_layer2_send_setmac_cb);
5986}
5987
5988static int
5989qeth_layer2_send_delmac_cb(struct qeth_card *card,
5990			   struct qeth_reply *reply,
5991			   unsigned long data)
5992{
5993	struct qeth_ipa_cmd *cmd;
5994
5995	QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
5996	cmd = (struct qeth_ipa_cmd *) data;
5997	if (cmd->hdr.return_code) {
5998		QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5999		cmd->hdr.return_code = -EIO;
6000		return 0;
6001	}
6002	card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
6003
6004	return 0;
6005}
6006static int
6007qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
6008{
6009	QETH_DBF_TEXT(trace, 2, "L2Delmac");
6010	if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
6011		return 0;
6012	return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
6013					  qeth_layer2_send_delmac_cb);
6014}
6015
6016static int
6017qeth_layer2_set_mac_address(struct net_device *dev, void *p)
6018{
6019	struct sockaddr *addr = p;
6020	struct qeth_card *card;
6021	int rc = 0;
6022
6023	QETH_DBF_TEXT(trace, 3, "setmac");
6024
6025	if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
6026		QETH_DBF_TEXT(trace, 3, "setmcINV");
6027		return -EOPNOTSUPP;
6028	}
6029	card = (struct qeth_card *) dev->priv;
6030
6031	if (!card->options.layer2) {
6032		PRINT_WARN("Setting MAC address on %s is not supported "
6033			   "in Layer 3 mode.\n", dev->name);
6034		QETH_DBF_TEXT(trace, 3, "setmcLY3");
6035		return -EOPNOTSUPP;
6036	}
6037	if (card->info.type == QETH_CARD_TYPE_OSN) {
6038		PRINT_WARN("Setting MAC address on %s is not supported.\n",
6039			   dev->name);
6040		QETH_DBF_TEXT(trace, 3, "setmcOSN");
6041		return -EOPNOTSUPP;
6042	}
6043	QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
6044	QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
6045	rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
6046	if (!rc)
6047		rc = qeth_layer2_send_setmac(card, addr->sa_data);
6048	return rc;
6049}
6050
6051static void
6052qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
6053			__u8 command, enum qeth_prot_versions prot)
6054{
6055	memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
6056	cmd->hdr.command = command;
6057	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
6058	cmd->hdr.seqno = card->seqno.ipa;
6059	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
6060	cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
6061	if (card->options.layer2)
6062		cmd->hdr.prim_version_no = 2;
6063	else
6064		cmd->hdr.prim_version_no = 1;
6065	cmd->hdr.param_count = 1;
6066	cmd->hdr.prot_version = prot;
6067	cmd->hdr.ipa_supported = 0;
6068	cmd->hdr.ipa_enabled = 0;
6069}
6070
6071static struct qeth_cmd_buffer *
6072qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6073		       enum qeth_prot_versions prot)
6074{
6075	struct qeth_cmd_buffer *iob;
6076	struct qeth_ipa_cmd *cmd;
6077
6078	iob = qeth_wait_for_buffer(&card->write);
6079	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6080	qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
6081
6082	return iob;
6083}
6084
6085static int
6086qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6087{
6088	int rc;
6089	struct qeth_cmd_buffer *iob;
6090	struct qeth_ipa_cmd *cmd;
6091
6092	QETH_DBF_TEXT(trace,4,"setdelmc");
6093
6094	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6095	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6096	memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
6097	if (addr->proto == QETH_PROT_IPV6)
6098		memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
6099		       sizeof(struct in6_addr));
6100	else
6101		memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
6102
6103	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6104
6105	return rc;
6106}
6107static void
6108qeth_fill_netmask(u8 *netmask, unsigned int len)
6109{
6110	int i,j;
6111	for (i=0;i<16;i++) {
6112		j=(len)-(i*8);
6113		if (j >= 8)
6114			netmask[i] = 0xff;
6115		else if (j > 0)
6116			netmask[i] = (u8)(0xFF00>>j);
6117		else
6118			netmask[i] = 0;
6119	}
6120}
6121
6122static int
6123qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
6124		   int ipacmd, unsigned int flags)
6125{
6126	int rc;
6127	struct qeth_cmd_buffer *iob;
6128	struct qeth_ipa_cmd *cmd;
6129	__u8 netmask[16];
6130
6131	QETH_DBF_TEXT(trace,4,"setdelip");
6132	QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
6133
6134	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6135	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6136	if (addr->proto == QETH_PROT_IPV6) {
6137		memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
6138		       sizeof(struct in6_addr));
6139		qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
6140		memcpy(cmd->data.setdelip6.mask, netmask,
6141		       sizeof(struct in6_addr));
6142		cmd->data.setdelip6.flags = flags;
6143	} else {
6144		memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
6145		memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
6146		cmd->data.setdelip4.flags = flags;
6147	}
6148
6149	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6150
6151	return rc;
6152}
6153
6154static int
6155qeth_layer2_register_addr_entry(struct qeth_card *card,
6156				struct qeth_ipaddr *addr)
6157{
6158	if (!addr->is_multicast)
6159		return 0;
6160	QETH_DBF_TEXT(trace, 2, "setgmac");
6161	QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6162	return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
6163}
6164
6165static int
6166qeth_layer2_deregister_addr_entry(struct qeth_card *card,
6167				  struct qeth_ipaddr *addr)
6168{
6169	if (!addr->is_multicast)
6170		return 0;
6171	QETH_DBF_TEXT(trace, 2, "delgmac");
6172	QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6173	return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
6174}
6175
6176static int
6177qeth_layer3_register_addr_entry(struct qeth_card *card,
6178				struct qeth_ipaddr *addr)
6179{
6180	char buf[50];
6181	int rc;
6182	int cnt = 3;
6183
6184	if (addr->proto == QETH_PROT_IPV4) {
6185		QETH_DBF_TEXT(trace, 2,"setaddr4");
6186		QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6187	} else if (addr->proto == QETH_PROT_IPV6) {
6188		QETH_DBF_TEXT(trace, 2, "setaddr6");
6189		QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6190		QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6191	} else {
6192		QETH_DBF_TEXT(trace, 2, "setaddr?");
6193		QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6194	}
6195	do {
6196		if (addr->is_multicast)
6197			rc =  qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
6198		else
6199			rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
6200					addr->set_flags);
6201		if (rc)
6202			QETH_DBF_TEXT(trace, 2, "failed");
6203	} while ((--cnt > 0) && rc);
6204	if (rc){
6205		QETH_DBF_TEXT(trace, 2, "FAILED");
6206		qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6207		PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
6208			   buf, rc, rc);
6209	}
6210	return rc;
6211}
6212
6213static int
6214qeth_layer3_deregister_addr_entry(struct qeth_card *card,
6215				  struct qeth_ipaddr *addr)
6216{
6217	//char buf[50];
6218	int rc;
6219
6220	if (addr->proto == QETH_PROT_IPV4) {
6221		QETH_DBF_TEXT(trace, 2,"deladdr4");
6222		QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6223	} else if (addr->proto == QETH_PROT_IPV6) {
6224		QETH_DBF_TEXT(trace, 2, "deladdr6");
6225		QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6226		QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6227	} else {
6228		QETH_DBF_TEXT(trace, 2, "deladdr?");
6229		QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6230	}
6231	if (addr->is_multicast)
6232		rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
6233	else
6234		rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
6235					addr->del_flags);
6236	if (rc) {
6237		QETH_DBF_TEXT(trace, 2, "failed");
6238		/* TODO: re-activate this warning as soon as we have a
6239		 * clean mirco code
6240		qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6241		PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
6242			   buf, rc);
6243		*/
6244	}
6245	return rc;
6246}
6247
6248static int
6249qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6250{
6251	if (card->options.layer2)
6252		return qeth_layer2_register_addr_entry(card, addr);
6253
6254	return qeth_layer3_register_addr_entry(card, addr);
6255}
6256
6257static int
6258qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6259{
6260	if (card->options.layer2)
6261		return qeth_layer2_deregister_addr_entry(card, addr);
6262
6263	return qeth_layer3_deregister_addr_entry(card, addr);
6264}
6265
6266static u32
6267qeth_ethtool_get_tx_csum(struct net_device *dev)
6268{
6269	/* We may need to say that we support tx csum offload if
6270	 * we do EDDP or TSO. There are discussions going on to
6271	 * enforce rules in the stack and in ethtool that make
6272	 * SG and TSO depend on HW_CSUM. At the moment there are
6273	 * no such rules....
6274	 * If we say yes here, we have to checksum outbound packets
6275	 * any time. */
6276	return 0;
6277}
6278
6279static int
6280qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
6281{
6282	return -EINVAL;
6283}
6284
6285static u32
6286qeth_ethtool_get_rx_csum(struct net_device *dev)
6287{
6288	struct qeth_card *card = (struct qeth_card *)dev->priv;
6289
6290	return (card->options.checksum_type == HW_CHECKSUMMING);
6291}
6292
6293static int
6294qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6295{
6296	struct qeth_card *card = (struct qeth_card *)dev->priv;
6297
6298	if ((card->state != CARD_STATE_DOWN) &&
6299	    (card->state != CARD_STATE_RECOVER))
6300		return -EPERM;
6301	if (data)
6302		card->options.checksum_type = HW_CHECKSUMMING;
6303	else
6304		card->options.checksum_type = SW_CHECKSUMMING;
6305	return 0;
6306}
6307
6308static u32
6309qeth_ethtool_get_sg(struct net_device *dev)
6310{
6311	struct qeth_card *card = (struct qeth_card *)dev->priv;
6312
6313	return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6314		(dev->features & NETIF_F_SG));
6315}
6316
6317static int
6318qeth_ethtool_set_sg(struct net_device *dev, u32 data)
6319{
6320	struct qeth_card *card = (struct qeth_card *)dev->priv;
6321
6322	if (data) {
6323		if (card->options.large_send != QETH_LARGE_SEND_NO)
6324			dev->features |= NETIF_F_SG;
6325		else {
6326			dev->features &= ~NETIF_F_SG;
6327			return -EINVAL;
6328		}
6329	} else
6330		dev->features &= ~NETIF_F_SG;
6331	return 0;
6332}
6333
6334static u32
6335qeth_ethtool_get_tso(struct net_device *dev)
6336{
6337	struct qeth_card *card = (struct qeth_card *)dev->priv;
6338
6339	return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6340		(dev->features & NETIF_F_TSO));
6341}
6342
6343static int
6344qeth_ethtool_set_tso(struct net_device *dev, u32 data)
6345{
6346	struct qeth_card *card = (struct qeth_card *)dev->priv;
6347
6348	if (data) {
6349		if (card->options.large_send != QETH_LARGE_SEND_NO)
6350			dev->features |= NETIF_F_TSO;
6351		else {
6352			dev->features &= ~NETIF_F_TSO;
6353			return -EINVAL;
6354		}
6355	} else
6356		dev->features &= ~NETIF_F_TSO;
6357	return 0;
6358}
6359
6360static struct ethtool_ops qeth_ethtool_ops = {
6361	.get_tx_csum = qeth_ethtool_get_tx_csum,
6362	.set_tx_csum = qeth_ethtool_set_tx_csum,
6363	.get_rx_csum = qeth_ethtool_get_rx_csum,
6364	.set_rx_csum = qeth_ethtool_set_rx_csum,
6365	.get_sg      = qeth_ethtool_get_sg,
6366	.set_sg      = qeth_ethtool_set_sg,
6367	.get_tso     = qeth_ethtool_get_tso,
6368	.set_tso     = qeth_ethtool_set_tso,
6369};
6370
6371static int
6372qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr)
6373{
6374	struct qeth_card *card;
6375	struct ethhdr *eth;
6376
6377	card = qeth_get_card_from_dev(skb->dev);
6378	if (card->options.layer2)
6379		goto haveheader;
6380#ifdef CONFIG_QETH_IPV6
6381	/* cause of the manipulated arp constructor and the ARP
6382	   flag for OSAE devices we have some nasty exceptions */
6383	if (card->info.type == QETH_CARD_TYPE_OSAE) {
6384		if (!card->options.fake_ll) {
6385			if ((skb->pkt_type==PACKET_OUTGOING) &&
6386			    (skb->protocol==ETH_P_IPV6))
6387				goto haveheader;
6388			else
6389				return 0;
6390		} else {
6391			if ((skb->pkt_type==PACKET_OUTGOING) &&
6392			    (skb->protocol==ETH_P_IP))
6393				return 0;
6394			else
6395				goto haveheader;
6396		}
6397	}
6398#endif
6399	if (!card->options.fake_ll)
6400		return 0;
6401haveheader:
6402	eth = eth_hdr(skb);
6403	memcpy(haddr, eth->h_source, ETH_ALEN);
6404	return ETH_ALEN;
6405}
6406
6407static int
6408qeth_netdev_init(struct net_device *dev)
6409{
6410	struct qeth_card *card;
6411
6412	card = (struct qeth_card *) dev->priv;
6413
6414	QETH_DBF_TEXT(trace,3,"initdev");
6415
6416	dev->tx_timeout = &qeth_tx_timeout;
6417	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6418	dev->open = qeth_open;
6419	dev->stop = qeth_stop;
6420	dev->hard_start_xmit = qeth_hard_start_xmit;
6421	dev->do_ioctl = qeth_do_ioctl;
6422	dev->get_stats = qeth_get_stats;
6423	dev->change_mtu = qeth_change_mtu;
6424	dev->neigh_setup = qeth_neigh_setup;
6425	dev->set_multicast_list = qeth_set_multicast_list;
6426#ifdef CONFIG_QETH_VLAN
6427	dev->vlan_rx_register = qeth_vlan_rx_register;
6428	dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
6429	dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
6430#endif
6431	if (qeth_get_netdev_flags(card) & IFF_NOARP) {
6432		dev->rebuild_header = NULL;
6433		dev->hard_header = NULL;
6434		dev->header_cache_update = NULL;
6435		dev->hard_header_cache = NULL;
6436	}
6437#ifdef CONFIG_QETH_IPV6
6438	/*IPv6 address autoconfiguration stuff*/
6439	if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
6440		card->dev->dev_id = card->info.unique_id & 0xffff;
6441#endif
6442	if (card->options.fake_ll &&
6443		(qeth_get_netdev_flags(card) & IFF_NOARP))
6444			dev->hard_header = qeth_fake_header;
6445	if (dev->type == ARPHRD_IEEE802_TR)
6446		dev->hard_header_parse = NULL;
6447	else
6448		dev->hard_header_parse = qeth_hard_header_parse;
6449	dev->set_mac_address = qeth_layer2_set_mac_address;
6450	dev->flags |= qeth_get_netdev_flags(card);
6451	if ((card->options.fake_broadcast) ||
6452	    (card->info.broadcast_capable))
6453		dev->flags |= IFF_BROADCAST;
6454	dev->hard_header_len =
6455			qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
6456	dev->addr_len = OSA_ADDR_LEN;
6457	dev->mtu = card->info.initial_mtu;
6458	if (card->info.type != QETH_CARD_TYPE_OSN)
6459		SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
6460	SET_MODULE_OWNER(dev);
6461	return 0;
6462}
6463
6464static void
6465qeth_init_func_level(struct qeth_card *card)
6466{
6467	if (card->ipato.enabled) {
6468		if (card->info.type == QETH_CARD_TYPE_IQD)
6469				card->info.func_level =
6470					QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6471		else
6472				card->info.func_level =
6473					QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6474	} else {
6475		if (card->info.type == QETH_CARD_TYPE_IQD)
6476			card->info.func_level =
6477				QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6478		else
6479			card->info.func_level =
6480				QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6481	}
6482}
6483
6484/**
6485 * hardsetup card, initialize MPC and QDIO stuff
6486 */
6487static int
6488qeth_hardsetup_card(struct qeth_card *card)
6489{
6490	int retries = 3;
6491	int rc;
6492
6493	QETH_DBF_TEXT(setup, 2, "hrdsetup");
6494
6495retry:
6496	if (retries < 3){
6497		PRINT_WARN("Retrying to do IDX activates.\n");
6498		ccw_device_set_offline(CARD_DDEV(card));
6499		ccw_device_set_offline(CARD_WDEV(card));
6500		ccw_device_set_offline(CARD_RDEV(card));
6501		ccw_device_set_online(CARD_RDEV(card));
6502		ccw_device_set_online(CARD_WDEV(card));
6503		ccw_device_set_online(CARD_DDEV(card));
6504	}
6505	rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
6506	if (rc == -ERESTARTSYS) {
6507		QETH_DBF_TEXT(setup, 2, "break1");
6508		return rc;
6509	} else if (rc) {
6510		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6511		if (--retries < 0)
6512			goto out;
6513		else
6514			goto retry;
6515	}
6516	if ((rc = qeth_get_unitaddr(card))){
6517		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6518		return rc;
6519	}
6520	qeth_init_tokens(card);
6521	qeth_init_func_level(card);
6522	rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6523	if (rc == -ERESTARTSYS) {
6524		QETH_DBF_TEXT(setup, 2, "break2");
6525		return rc;
6526	} else if (rc) {
6527		QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6528		if (--retries < 0)
6529			goto out;
6530		else
6531			goto retry;
6532	}
6533	rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6534	if (rc == -ERESTARTSYS) {
6535		QETH_DBF_TEXT(setup, 2, "break3");
6536		return rc;
6537	} else if (rc) {
6538		QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6539		if (--retries < 0)
6540			goto out;
6541		else
6542			goto retry;
6543	}
6544	if ((rc = qeth_mpc_initialize(card))){
6545		QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6546		goto out;
6547	}
6548	/*network device will be recovered*/
6549	if (card->dev) {
6550		card->dev->hard_header = card->orig_hard_header;
6551		if (card->options.fake_ll &&
6552		    (qeth_get_netdev_flags(card) & IFF_NOARP))
6553			card->dev->hard_header = qeth_fake_header;
6554		return 0;
6555	}
6556	/* at first set_online allocate netdev */
6557	card->dev = qeth_get_netdevice(card->info.type,
6558				       card->info.link_type);
6559	if (!card->dev){
6560		qeth_qdio_clear_card(card, card->info.type !=
6561				     QETH_CARD_TYPE_IQD);
6562		rc = -ENODEV;
6563		QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6564		goto out;
6565	}
6566	card->dev->priv = card;
6567	card->orig_hard_header = card->dev->hard_header;
6568	card->dev->type = qeth_get_arphdr_type(card->info.type,
6569					       card->info.link_type);
6570	card->dev->init = qeth_netdev_init;
6571	return 0;
6572out:
6573	PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6574	return rc;
6575}
6576
6577static int
6578qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6579			    unsigned long data)
6580{
6581	struct qeth_ipa_cmd *cmd;
6582
6583	QETH_DBF_TEXT(trace,4,"defadpcb");
6584
6585	cmd = (struct qeth_ipa_cmd *) data;
6586	if (cmd->hdr.return_code == 0){
6587		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6588		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6589			card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6590#ifdef CONFIG_QETH_IPV6
6591		if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6592			card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6593#endif
6594	}
6595	if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6596	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6597		card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6598		QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6599	}
6600	return 0;
6601}
6602
6603static int
6604qeth_default_setadapterparms_cb(struct qeth_card *card,
6605				struct qeth_reply *reply,
6606				unsigned long data)
6607{
6608	struct qeth_ipa_cmd *cmd;
6609
6610	QETH_DBF_TEXT(trace,4,"defadpcb");
6611
6612	cmd = (struct qeth_ipa_cmd *) data;
6613	if (cmd->hdr.return_code == 0)
6614		cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6615	return 0;
6616}
6617
6618
6619
6620static int
6621qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6622			      unsigned long data)
6623{
6624	struct qeth_ipa_cmd *cmd;
6625
6626	QETH_DBF_TEXT(trace,3,"quyadpcb");
6627
6628	cmd = (struct qeth_ipa_cmd *) data;
6629	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6630		card->info.link_type =
6631		      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6632	card->options.adp.supported_funcs =
6633		cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6634	return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6635}
6636
6637static int
6638qeth_query_setadapterparms(struct qeth_card *card)
6639{
6640	int rc;
6641	struct qeth_cmd_buffer *iob;
6642
6643	QETH_DBF_TEXT(trace,3,"queryadp");
6644	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6645				   sizeof(struct qeth_ipacmd_setadpparms));
6646	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6647	return rc;
6648}
6649
6650static int
6651qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6652				   struct qeth_reply *reply,
6653				   unsigned long data)
6654{
6655	struct qeth_ipa_cmd *cmd;
6656
6657	QETH_DBF_TEXT(trace,4,"chgmaccb");
6658
6659	cmd = (struct qeth_ipa_cmd *) data;
6660	if (!card->options.layer2 ||
6661	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
6662		memcpy(card->dev->dev_addr,
6663		       &cmd->data.setadapterparms.data.change_addr.addr,
6664		       OSA_ADDR_LEN);
6665		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
6666	}
6667	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6668	return 0;
6669}
6670
6671static int
6672qeth_setadpparms_change_macaddr(struct qeth_card *card)
6673{
6674	int rc;
6675	struct qeth_cmd_buffer *iob;
6676	struct qeth_ipa_cmd *cmd;
6677
6678	QETH_DBF_TEXT(trace,4,"chgmac");
6679
6680	iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6681				   sizeof(struct qeth_ipacmd_setadpparms));
6682	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6683	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6684	cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6685	memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6686	       card->dev->dev_addr, OSA_ADDR_LEN);
6687	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6688			       NULL);
6689	return rc;
6690}
6691
6692static int
6693qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6694{
6695	int rc;
6696	struct qeth_cmd_buffer *iob;
6697	struct qeth_ipa_cmd *cmd;
6698
6699	QETH_DBF_TEXT(trace,4,"adpmode");
6700
6701	iob = qeth_get_adapter_cmd(card, command,
6702				   sizeof(struct qeth_ipacmd_setadpparms));
6703	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6704	cmd->data.setadapterparms.data.mode = mode;
6705	rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6706			       NULL);
6707	return rc;
6708}
6709
6710static int
6711qeth_setadapter_hstr(struct qeth_card *card)
6712{
6713	int rc;
6714
6715	QETH_DBF_TEXT(trace,4,"adphstr");
6716
6717	if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6718		rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6719					   card->options.broadcast_mode);
6720		if (rc)
6721			PRINT_WARN("couldn't set broadcast mode on "
6722				   "device %s: x%x\n",
6723				   CARD_BUS_ID(card), rc);
6724		rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6725					   card->options.macaddr_mode);
6726		if (rc)
6727			PRINT_WARN("couldn't set macaddr mode on "
6728				   "device %s: x%x\n", CARD_BUS_ID(card), rc);
6729		return rc;
6730	}
6731	if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6732		PRINT_WARN("set adapter parameters not available "
6733			   "to set broadcast mode, using ALLRINGS "
6734			   "on device %s:\n", CARD_BUS_ID(card));
6735	if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6736		PRINT_WARN("set adapter parameters not available "
6737			   "to set macaddr mode, using NONCANONICAL "
6738			   "on device %s:\n", CARD_BUS_ID(card));
6739	return 0;
6740}
6741
6742static int
6743qeth_setadapter_parms(struct qeth_card *card)
6744{
6745	int rc;
6746
6747	QETH_DBF_TEXT(setup, 2, "setadprm");
6748
6749	if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6750		PRINT_WARN("set adapter parameters not supported "
6751			   "on device %s.\n",
6752			   CARD_BUS_ID(card));
6753		QETH_DBF_TEXT(setup, 2, " notsupp");
6754		return 0;
6755	}
6756	rc = qeth_query_setadapterparms(card);
6757	if (rc) {
6758		PRINT_WARN("couldn't set adapter parameters on device %s: "
6759			   "x%x\n", CARD_BUS_ID(card), rc);
6760		return rc;
6761	}
6762	if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6763		rc = qeth_setadpparms_change_macaddr(card);
6764		if (rc)
6765			PRINT_WARN("couldn't get MAC address on "
6766				   "device %s: x%x\n",
6767				   CARD_BUS_ID(card), rc);
6768	}
6769
6770	if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6771	    (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6772		rc = qeth_setadapter_hstr(card);
6773
6774	return rc;
6775}
6776
6777static int
6778qeth_layer2_initialize(struct qeth_card *card)
6779{
6780        int rc = 0;
6781
6782
6783        QETH_DBF_TEXT(setup, 2, "doL2init");
6784        QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6785
6786	rc = qeth_query_setadapterparms(card);
6787	if (rc) {
6788		PRINT_WARN("could not query adapter parameters on device %s: "
6789			   "x%x\n", CARD_BUS_ID(card), rc);
6790	}
6791
6792	rc = qeth_setadpparms_change_macaddr(card);
6793	if (rc) {
6794		PRINT_WARN("couldn't get MAC address on "
6795			   "device %s: x%x\n",
6796			   CARD_BUS_ID(card), rc);
6797		QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
6798		return rc;
6799        }
6800	QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
6801
6802	rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
6803        if (rc)
6804		QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
6805        return 0;
6806}
6807
6808
6809static int
6810qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6811		       enum qeth_prot_versions prot)
6812{
6813	int rc;
6814	struct qeth_cmd_buffer *iob;
6815
6816	iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
6817	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6818
6819	return rc;
6820}
6821
6822static int
6823qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
6824{
6825	int rc;
6826
6827	QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
6828
6829	rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
6830	return rc;
6831}
6832
6833static int
6834qeth_send_stoplan(struct qeth_card *card)
6835{
6836	int rc = 0;
6837
6838	/*
6839	 * TODO: according to the IPA format document page 14,
6840	 * TCP/IP (we!) never issue a STOPLAN
6841	 * is this right ?!?
6842	 */
6843	QETH_DBF_TEXT(trace, 2, "stoplan");
6844
6845	rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
6846	return rc;
6847}
6848
6849static int
6850qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6851			unsigned long data)
6852{
6853	struct qeth_ipa_cmd *cmd;
6854
6855	QETH_DBF_TEXT(setup, 2, "qipasscb");
6856
6857	cmd = (struct qeth_ipa_cmd *) data;
6858	if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6859		card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6860		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6861		/* Disable IPV6 support hard coded for Hipersockets */
6862		if(card->info.type == QETH_CARD_TYPE_IQD)
6863			card->options.ipa4.supported_funcs &= ~IPA_IPV6;
6864	} else {
6865#ifdef CONFIG_QETH_IPV6
6866		card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
6867		card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6868#endif
6869	}
6870	QETH_DBF_TEXT(setup, 2, "suppenbl");
6871	QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
6872	QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
6873	return 0;
6874}
6875
6876static int
6877qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
6878{
6879	int rc;
6880	struct qeth_cmd_buffer *iob;
6881
6882	QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
6883	if (card->options.layer2) {
6884		QETH_DBF_TEXT(setup, 2, "noprmly2");
6885		return -EPERM;
6886	}
6887
6888	iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
6889	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
6890	return rc;
6891}
6892
6893static struct qeth_cmd_buffer *
6894qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
6895			 __u16 cmd_code, __u16 len,
6896			 enum qeth_prot_versions prot)
6897{
6898	struct qeth_cmd_buffer *iob;
6899	struct qeth_ipa_cmd *cmd;
6900
6901	QETH_DBF_TEXT(trace,4,"getasscm");
6902	iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
6903
6904	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6905	cmd->data.setassparms.hdr.assist_no = ipa_func;
6906	cmd->data.setassparms.hdr.length = 8 + len;
6907	cmd->data.setassparms.hdr.command_code = cmd_code;
6908	cmd->data.setassparms.hdr.return_code = 0;
6909	cmd->data.setassparms.hdr.seq_no = 0;
6910
6911	return iob;
6912}
6913
6914static int
6915qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
6916		      __u16 len, long data,
6917		      int (*reply_cb)
6918		      (struct qeth_card *,struct qeth_reply *,unsigned long),
6919		      void *reply_param)
6920{
6921	int rc;
6922	struct qeth_ipa_cmd *cmd;
6923
6924	QETH_DBF_TEXT(trace,4,"sendassp");
6925
6926	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6927	if (len <= sizeof(__u32))
6928		cmd->data.setassparms.data.flags_32bit = (__u32) data;
6929	else   /* (len > sizeof(__u32)) */
6930		memcpy(&cmd->data.setassparms.data, (void *) data, len);
6931
6932	rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
6933	return rc;
6934}
6935
6936#ifdef CONFIG_QETH_IPV6
6937static int
6938qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
6939				  enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
6940
6941{
6942	int rc;
6943	struct qeth_cmd_buffer *iob;
6944
6945	QETH_DBF_TEXT(trace,4,"simassp6");
6946	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6947				       0, QETH_PROT_IPV6);
6948	rc = qeth_send_setassparms(card, iob, 0, 0,
6949				   qeth_default_setassparms_cb, NULL);
6950	return rc;
6951}
6952#endif
6953
6954static int
6955qeth_send_simple_setassparms(struct qeth_card *card,
6956			     enum qeth_ipa_funcs ipa_func,
6957			     __u16 cmd_code, long data)
6958{
6959	int rc;
6960	int length = 0;
6961	struct qeth_cmd_buffer *iob;
6962
6963	QETH_DBF_TEXT(trace,4,"simassp4");
6964	if (data)
6965		length = sizeof(__u32);
6966	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6967				       length, QETH_PROT_IPV4);
6968	rc = qeth_send_setassparms(card, iob, length, data,
6969				   qeth_default_setassparms_cb, NULL);
6970	return rc;
6971}
6972
6973static int
6974qeth_start_ipa_arp_processing(struct qeth_card *card)
6975{
6976	int rc;
6977
6978	QETH_DBF_TEXT(trace,3,"ipaarp");
6979
6980	if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
6981		PRINT_WARN("ARP processing not supported "
6982			   "on %s!\n", QETH_CARD_IFNAME(card));
6983		return 0;
6984	}
6985	rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
6986					  IPA_CMD_ASS_START, 0);
6987	if (rc) {
6988		PRINT_WARN("Could not start ARP processing "
6989			   "assist on %s: 0x%x\n",
6990			   QETH_CARD_IFNAME(card), rc);
6991	}
6992	return rc;
6993}
6994
6995static int
6996qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
6997{
6998	int rc;
6999
7000	QETH_DBF_TEXT(trace,3,"ipaipfrg");
7001
7002	if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
7003		PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
7004			   QETH_CARD_IFNAME(card));
7005		return  -EOPNOTSUPP;
7006	}
7007
7008	rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
7009					  IPA_CMD_ASS_START, 0);
7010	if (rc) {
7011		PRINT_WARN("Could not start Hardware IP fragmentation "
7012			   "assist on %s: 0x%x\n",
7013			   QETH_CARD_IFNAME(card), rc);
7014	} else
7015		PRINT_INFO("Hardware IP fragmentation enabled \n");
7016	return rc;
7017}
7018
7019static int
7020qeth_start_ipa_source_mac(struct qeth_card *card)
7021{
7022	int rc;
7023
7024	QETH_DBF_TEXT(trace,3,"stsrcmac");
7025
7026	if (!card->options.fake_ll)
7027		return -EOPNOTSUPP;
7028
7029	if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
7030		PRINT_INFO("Inbound source address not "
7031			   "supported on %s\n", QETH_CARD_IFNAME(card));
7032		return -EOPNOTSUPP;
7033	}
7034
7035	rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
7036					  IPA_CMD_ASS_START, 0);
7037	if (rc)
7038		PRINT_WARN("Could not start inbound source "
7039			   "assist on %s: 0x%x\n",
7040			   QETH_CARD_IFNAME(card), rc);
7041	return rc;
7042}
7043
7044static int
7045qeth_start_ipa_vlan(struct qeth_card *card)
7046{
7047	int rc = 0;
7048
7049	QETH_DBF_TEXT(trace,3,"strtvlan");
7050
7051#ifdef CONFIG_QETH_VLAN
7052	if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
7053		PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
7054		return -EOPNOTSUPP;
7055	}
7056
7057	rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
7058					  IPA_CMD_ASS_START,0);
7059	if (rc) {
7060		PRINT_WARN("Could not start vlan "
7061			   "assist on %s: 0x%x\n",
7062			   QETH_CARD_IFNAME(card), rc);
7063	} else {
7064		PRINT_INFO("VLAN enabled \n");
7065		card->dev->features |=
7066			NETIF_F_HW_VLAN_FILTER |
7067			NETIF_F_HW_VLAN_TX |
7068			NETIF_F_HW_VLAN_RX;
7069	}
7070#endif /* QETH_VLAN */
7071	return rc;
7072}
7073
7074static int
7075qeth_start_ipa_multicast(struct qeth_card *card)
7076{
7077	int rc;
7078
7079	QETH_DBF_TEXT(trace,3,"stmcast");
7080
7081	if (!qeth_is_supported(card, IPA_MULTICASTING)) {
7082		PRINT_WARN("Multicast not supported on %s\n",
7083			   QETH_CARD_IFNAME(card));
7084		return -EOPNOTSUPP;
7085	}
7086
7087	rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
7088					  IPA_CMD_ASS_START,0);
7089	if (rc) {
7090		PRINT_WARN("Could not start multicast "
7091			   "assist on %s: rc=%i\n",
7092			   QETH_CARD_IFNAME(card), rc);
7093	} else {
7094		PRINT_INFO("Multicast enabled\n");
7095		card->dev->flags |= IFF_MULTICAST;
7096	}
7097	return rc;
7098}
7099
7100#ifdef CONFIG_QETH_IPV6
7101static int
7102qeth_softsetup_ipv6(struct qeth_card *card)
7103{
7104	int rc;
7105
7106	QETH_DBF_TEXT(trace,3,"softipv6");
7107
7108	rc = qeth_send_startlan(card, QETH_PROT_IPV6);
7109	if (rc) {
7110		PRINT_ERR("IPv6 startlan failed on %s\n",
7111			  QETH_CARD_IFNAME(card));
7112		return rc;
7113	}
7114	rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
7115	if (rc) {
7116		PRINT_ERR("IPv6 query ipassist failed on %s\n",
7117			  QETH_CARD_IFNAME(card));
7118		return rc;
7119	}
7120	rc = qeth_send_simple_setassparms(card, IPA_IPV6,
7121					  IPA_CMD_ASS_START, 3);
7122	if (rc) {
7123		PRINT_WARN("IPv6 start assist (version 4) failed "
7124			   "on %s: 0x%x\n",
7125			   QETH_CARD_IFNAME(card), rc);
7126		return rc;
7127	}
7128	rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
7129					       IPA_CMD_ASS_START);
7130	if (rc) {
7131		PRINT_WARN("IPV6 start assist (version 6) failed  "
7132			   "on %s: 0x%x\n",
7133			   QETH_CARD_IFNAME(card), rc);
7134		return rc;
7135	}
7136	rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
7137					       IPA_CMD_ASS_START);
7138	if (rc) {
7139		PRINT_WARN("Could not enable passthrough "
7140			   "on %s: 0x%x\n",
7141			   QETH_CARD_IFNAME(card), rc);
7142		return rc;
7143	}
7144	PRINT_INFO("IPV6 enabled \n");
7145	return 0;
7146}
7147
7148#endif
7149
7150static int
7151qeth_start_ipa_ipv6(struct qeth_card *card)
7152{
7153	int rc = 0;
7154#ifdef CONFIG_QETH_IPV6
7155	QETH_DBF_TEXT(trace,3,"strtipv6");
7156
7157	if (!qeth_is_supported(card, IPA_IPV6)) {
7158		PRINT_WARN("IPv6 not supported on %s\n",
7159			   QETH_CARD_IFNAME(card));
7160		return 0;
7161	}
7162	rc = qeth_softsetup_ipv6(card);
7163#endif
7164	return rc ;
7165}
7166
7167static int
7168qeth_start_ipa_broadcast(struct qeth_card *card)
7169{
7170	int rc;
7171
7172	QETH_DBF_TEXT(trace,3,"stbrdcst");
7173	card->info.broadcast_capable = 0;
7174	if (!qeth_is_supported(card, IPA_FILTERING)) {
7175		PRINT_WARN("Broadcast not supported on %s\n",
7176			   QETH_CARD_IFNAME(card));
7177		rc = -EOPNOTSUPP;
7178		goto out;
7179	}
7180	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7181					  IPA_CMD_ASS_START, 0);
7182	if (rc) {
7183		PRINT_WARN("Could not enable broadcasting filtering "
7184			   "on %s: 0x%x\n",
7185			   QETH_CARD_IFNAME(card), rc);
7186		goto out;
7187	}
7188
7189	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7190					  IPA_CMD_ASS_CONFIGURE, 1);
7191	if (rc) {
7192		PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
7193			   QETH_CARD_IFNAME(card), rc);
7194		goto out;
7195	}
7196	card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
7197	PRINT_INFO("Broadcast enabled \n");
7198	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7199					  IPA_CMD_ASS_ENABLE, 1);
7200	if (rc) {
7201		PRINT_WARN("Could not set up broadcast echo filtering on "
7202			   "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
7203		goto out;
7204	}
7205	card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
7206out:
7207	if (card->info.broadcast_capable)
7208		card->dev->flags |= IFF_BROADCAST;
7209	else
7210		card->dev->flags &= ~IFF_BROADCAST;
7211	return rc;
7212}
7213
7214static int
7215qeth_send_checksum_command(struct qeth_card *card)
7216{
7217	int rc;
7218
7219	rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7220					  IPA_CMD_ASS_START, 0);
7221	if (rc) {
7222		PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
7223			   "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7224			   QETH_CARD_IFNAME(card), rc);
7225		return rc;
7226	}
7227	rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7228					  IPA_CMD_ASS_ENABLE,
7229					  card->info.csum_mask);
7230	if (rc) {
7231		PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
7232			   "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7233			   QETH_CARD_IFNAME(card), rc);
7234		return rc;
7235	}
7236	return 0;
7237}
7238
7239static int
7240qeth_start_ipa_checksum(struct qeth_card *card)
7241{
7242	int rc = 0;
7243
7244	QETH_DBF_TEXT(trace,3,"strtcsum");
7245
7246	if (card->options.checksum_type == NO_CHECKSUMMING) {
7247		PRINT_WARN("Using no checksumming on %s.\n",
7248			   QETH_CARD_IFNAME(card));
7249		return 0;
7250	}
7251	if (card->options.checksum_type == SW_CHECKSUMMING) {
7252		PRINT_WARN("Using SW checksumming on %s.\n",
7253			   QETH_CARD_IFNAME(card));
7254		return 0;
7255	}
7256	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
7257		PRINT_WARN("Inbound HW Checksumming not "
7258			   "supported on %s,\ncontinuing "
7259			   "using Inbound SW Checksumming\n",
7260			   QETH_CARD_IFNAME(card));
7261		card->options.checksum_type = SW_CHECKSUMMING;
7262		return 0;
7263	}
7264	rc = qeth_send_checksum_command(card);
7265	if (!rc) {
7266		PRINT_INFO("HW Checksumming (inbound) enabled \n");
7267	}
7268	return rc;
7269}
7270
7271static int
7272qeth_start_ipa_tso(struct qeth_card *card)
7273{
7274	int rc;
7275
7276	QETH_DBF_TEXT(trace,3,"sttso");
7277
7278	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
7279		PRINT_WARN("Outbound TSO not supported on %s\n",
7280			   QETH_CARD_IFNAME(card));
7281		rc = -EOPNOTSUPP;
7282	} else {
7283		rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
7284						  IPA_CMD_ASS_START,0);
7285		if (rc)
7286			PRINT_WARN("Could not start outbound TSO "
7287				   "assist on %s: rc=%i\n",
7288				   QETH_CARD_IFNAME(card), rc);
7289		else
7290			PRINT_INFO("Outbound TSO enabled\n");
7291	}
7292	if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
7293		card->options.large_send = QETH_LARGE_SEND_NO;
7294		card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
7295	}
7296	return rc;
7297}
7298
7299static int
7300qeth_start_ipassists(struct qeth_card *card)
7301{
7302	QETH_DBF_TEXT(trace,3,"strtipas");
7303	qeth_start_ipa_arp_processing(card);	/* go on*/
7304	qeth_start_ipa_ip_fragmentation(card); 	/* go on*/
7305	qeth_start_ipa_source_mac(card);	/* go on*/
7306	qeth_start_ipa_vlan(card);		/* go on*/
7307	qeth_start_ipa_multicast(card);		/* go on*/
7308	qeth_start_ipa_ipv6(card);		/* go on*/
7309	qeth_start_ipa_broadcast(card);		/* go on*/
7310	qeth_start_ipa_checksum(card);		/* go on*/
7311	qeth_start_ipa_tso(card);		/* go on*/
7312	return 0;
7313}
7314
7315static int
7316qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
7317		     enum qeth_prot_versions prot)
7318{
7319	int rc;
7320	struct qeth_ipa_cmd *cmd;
7321	struct qeth_cmd_buffer *iob;
7322
7323	QETH_DBF_TEXT(trace,4,"setroutg");
7324	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
7325	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7326	cmd->data.setrtg.type = (type);
7327	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7328
7329	return rc;
7330
7331}
7332
7333static void
7334qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
7335			enum qeth_prot_versions prot)
7336{
7337	if (card->info.type == QETH_CARD_TYPE_IQD) {
7338		switch (*type) {
7339		case NO_ROUTER:
7340		case PRIMARY_CONNECTOR:
7341		case SECONDARY_CONNECTOR:
7342		case MULTICAST_ROUTER:
7343			return;
7344		default:
7345			goto out_inval;
7346		}
7347	} else {
7348		switch (*type) {
7349		case NO_ROUTER:
7350		case PRIMARY_ROUTER:
7351		case SECONDARY_ROUTER:
7352			return;
7353		case MULTICAST_ROUTER:
7354			if (qeth_is_ipafunc_supported(card, prot,
7355						      IPA_OSA_MC_ROUTER))
7356				return;
7357		default:
7358			goto out_inval;
7359		}
7360	}
7361out_inval:
7362	PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
7363		   "Router status set to 'no router'.\n",
7364		   ((*type == PRIMARY_ROUTER)? "primary router" :
7365		    (*type == SECONDARY_ROUTER)? "secondary router" :
7366		    (*type == PRIMARY_CONNECTOR)? "primary connector" :
7367		    (*type == SECONDARY_CONNECTOR)? "secondary connector" :
7368		    (*type == MULTICAST_ROUTER)? "multicast router" :
7369		    "unknown"),
7370		   card->dev->name);
7371	*type = NO_ROUTER;
7372}
7373
7374int
7375qeth_setrouting_v4(struct qeth_card *card)
7376{
7377	int rc;
7378
7379	QETH_DBF_TEXT(trace,3,"setrtg4");
7380
7381	qeth_correct_routing_type(card, &card->options.route4.type,
7382				  QETH_PROT_IPV4);
7383
7384	rc = qeth_send_setrouting(card, card->options.route4.type,
7385				  QETH_PROT_IPV4);
7386	if (rc) {
7387 		card->options.route4.type = NO_ROUTER;
7388		PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7389			   "Type set to 'no router'.\n",
7390			   rc, QETH_CARD_IFNAME(card));
7391	}
7392	return rc;
7393}
7394
7395int
7396qeth_setrouting_v6(struct qeth_card *card)
7397{
7398	int rc = 0;
7399
7400	QETH_DBF_TEXT(trace,3,"setrtg6");
7401#ifdef CONFIG_QETH_IPV6
7402
7403	if (!qeth_is_supported(card, IPA_IPV6))
7404		return 0;
7405	qeth_correct_routing_type(card, &card->options.route6.type,
7406				  QETH_PROT_IPV6);
7407
7408	rc = qeth_send_setrouting(card, card->options.route6.type,
7409				  QETH_PROT_IPV6);
7410	if (rc) {
7411	 	card->options.route6.type = NO_ROUTER;
7412		PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7413			   "Type set to 'no router'.\n",
7414			   rc, QETH_CARD_IFNAME(card));
7415	}
7416#endif
7417	return rc;
7418}
7419
7420int
7421qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
7422{
7423	int rc = 0;
7424
7425	if (card->dev == NULL) {
7426		card->options.large_send = type;
7427		return 0;
7428	}
7429	if (card->state == CARD_STATE_UP)
7430		netif_tx_disable(card->dev);
7431	card->options.large_send = type;
7432	switch (card->options.large_send) {
7433	case QETH_LARGE_SEND_EDDP:
7434		card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7435		break;
7436	case QETH_LARGE_SEND_TSO:
7437		if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
7438			card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7439		} else {
7440			PRINT_WARN("TSO not supported on %s. "
7441				   "large_send set to 'no'.\n",
7442				   card->dev->name);
7443			card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7444			card->options.large_send = QETH_LARGE_SEND_NO;
7445			rc = -EOPNOTSUPP;
7446		}
7447		break;
7448	default: /* includes QETH_LARGE_SEND_NO */
7449		card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7450		break;
7451	}
7452	if (card->state == CARD_STATE_UP)
7453		netif_wake_queue(card->dev);
7454	return rc;
7455}
7456
7457/*
7458 * softsetup card: init IPA stuff
7459 */
7460static int
7461qeth_softsetup_card(struct qeth_card *card)
7462{
7463	int rc;
7464
7465	QETH_DBF_TEXT(setup, 2, "softsetp");
7466
7467	if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
7468		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7469		if (rc == 0xe080){
7470			PRINT_WARN("LAN on card %s if offline! "
7471				   "Waiting for STARTLAN from card.\n",
7472				   CARD_BUS_ID(card));
7473			card->lan_online = 0;
7474		}
7475		return rc;
7476	} else
7477		card->lan_online = 1;
7478	if (card->info.type==QETH_CARD_TYPE_OSN)
7479		goto out;
7480	qeth_set_large_send(card, card->options.large_send);
7481	if (card->options.layer2) {
7482		card->dev->features |=
7483			NETIF_F_HW_VLAN_FILTER |
7484			NETIF_F_HW_VLAN_TX |
7485			NETIF_F_HW_VLAN_RX;
7486		card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7487		card->info.broadcast_capable=1;
7488		if ((rc = qeth_layer2_initialize(card))) {
7489			QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7490			return rc;
7491		}
7492#ifdef CONFIG_QETH_VLAN
7493		qeth_layer2_process_vlans(card, 0);
7494#endif
7495		goto out;
7496	}
7497	if ((rc = qeth_setadapter_parms(card)))
7498		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7499	if ((rc = qeth_start_ipassists(card)))
7500		QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7501	if ((rc = qeth_setrouting_v4(card)))
7502		QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7503	if ((rc = qeth_setrouting_v6(card)))
7504		QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7505out:
7506	netif_tx_disable(card->dev);
7507	return 0;
7508}
7509
7510#ifdef CONFIG_QETH_IPV6
7511static int
7512qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7513		      unsigned long data)
7514{
7515	struct qeth_ipa_cmd *cmd;
7516
7517	cmd = (struct qeth_ipa_cmd *) data;
7518	if (cmd->hdr.return_code == 0)
7519		card->info.unique_id = *((__u16 *)
7520				&cmd->data.create_destroy_addr.unique_id[6]);
7521	else {
7522		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7523					UNIQUE_ID_NOT_BY_CARD;
7524		PRINT_WARN("couldn't get a unique id from the card on device "
7525			   "%s (result=x%x), using default id. ipv6 "
7526			   "autoconfig on other lpars may lead to duplicate "
7527			   "ip addresses. please use manually "
7528			   "configured ones.\n",
7529			   CARD_BUS_ID(card), cmd->hdr.return_code);
7530	}
7531	return 0;
7532}
7533#endif
7534
7535static int
7536qeth_put_unique_id(struct qeth_card *card)
7537{
7538
7539	int rc = 0;
7540#ifdef CONFIG_QETH_IPV6
7541	struct qeth_cmd_buffer *iob;
7542	struct qeth_ipa_cmd *cmd;
7543
7544	QETH_DBF_TEXT(trace,2,"puniqeid");
7545
7546	if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7547	    	UNIQUE_ID_NOT_BY_CARD)
7548		return -1;
7549	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7550				     QETH_PROT_IPV6);
7551	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7552	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7553		            card->info.unique_id;
7554	memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7555	       card->dev->dev_addr, OSA_ADDR_LEN);
7556	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7557#else
7558	card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7559				UNIQUE_ID_NOT_BY_CARD;
7560#endif
7561	return rc;
7562}
7563
7564/**
7565 * Clear IP List
7566 */
7567static void
7568qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7569{
7570	struct qeth_ipaddr *addr, *tmp;
7571	unsigned long flags;
7572
7573	QETH_DBF_TEXT(trace,4,"clearip");
7574	spin_lock_irqsave(&card->ip_lock, flags);
7575	/* clear todo list */
7576	list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7577		list_del(&addr->entry);
7578		kfree(addr);
7579	}
7580
7581	while (!list_empty(&card->ip_list)) {
7582		addr = list_entry(card->ip_list.next,
7583				  struct qeth_ipaddr, entry);
7584		list_del_init(&addr->entry);
7585		if (clean) {
7586			spin_unlock_irqrestore(&card->ip_lock, flags);
7587			qeth_deregister_addr_entry(card, addr);
7588			spin_lock_irqsave(&card->ip_lock, flags);
7589		}
7590		if (!recover || addr->is_multicast) {
7591			kfree(addr);
7592			continue;
7593		}
7594		list_add_tail(&addr->entry, card->ip_tbd_list);
7595	}
7596	spin_unlock_irqrestore(&card->ip_lock, flags);
7597}
7598
7599static void
7600qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7601			 int clear_start_mask)
7602{
7603	unsigned long flags;
7604
7605	spin_lock_irqsave(&card->thread_mask_lock, flags);
7606	card->thread_allowed_mask = threads;
7607	if (clear_start_mask)
7608		card->thread_start_mask &= threads;
7609	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7610	wake_up(&card->wait_q);
7611}
7612
7613static int
7614qeth_threads_running(struct qeth_card *card, unsigned long threads)
7615{
7616	unsigned long flags;
7617	int rc = 0;
7618
7619	spin_lock_irqsave(&card->thread_mask_lock, flags);
7620	rc = (card->thread_running_mask & threads);
7621	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7622	return rc;
7623}
7624
7625static int
7626qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7627{
7628	return wait_event_interruptible(card->wait_q,
7629			qeth_threads_running(card, threads) == 0);
7630}
7631
7632static int
7633qeth_stop_card(struct qeth_card *card, int recovery_mode)
7634{
7635	int rc = 0;
7636
7637	QETH_DBF_TEXT(setup ,2,"stopcard");
7638	QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7639
7640	qeth_set_allowed_threads(card, 0, 1);
7641	if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7642		return -ERESTARTSYS;
7643	if (card->read.state == CH_STATE_UP &&
7644	    card->write.state == CH_STATE_UP &&
7645	    (card->state == CARD_STATE_UP)) {
7646		if (recovery_mode &&
7647		    card->info.type != QETH_CARD_TYPE_OSN) {
7648			qeth_stop(card->dev);
7649		} else {
7650			rtnl_lock();
7651			dev_close(card->dev);
7652			rtnl_unlock();
7653		}
7654		if (!card->use_hard_stop) {
7655			__u8 *mac = &card->dev->dev_addr[0];
7656			rc = qeth_layer2_send_delmac(card, mac);
7657			QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7658			if ((rc = qeth_send_stoplan(card)))
7659				QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7660		}
7661		card->state = CARD_STATE_SOFTSETUP;
7662	}
7663	if (card->state == CARD_STATE_SOFTSETUP) {
7664#ifdef CONFIG_QETH_VLAN
7665		if (card->options.layer2)
7666			qeth_layer2_process_vlans(card, 1);
7667#endif
7668		qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7669		qeth_clear_ipacmd_list(card);
7670		card->state = CARD_STATE_HARDSETUP;
7671	}
7672	if (card->state == CARD_STATE_HARDSETUP) {
7673		if ((!card->use_hard_stop) &&
7674		    (!card->options.layer2))
7675			if ((rc = qeth_put_unique_id(card)))
7676				QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7677		qeth_qdio_clear_card(card, 0);
7678		qeth_clear_qdio_buffers(card);
7679		qeth_clear_working_pool_list(card);
7680		card->state = CARD_STATE_DOWN;
7681	}
7682	if (card->state == CARD_STATE_DOWN) {
7683		qeth_clear_cmd_buffers(&card->read);
7684		qeth_clear_cmd_buffers(&card->write);
7685	}
7686	card->use_hard_stop = 0;
7687	return rc;
7688}
7689
7690
7691static int
7692qeth_get_unique_id(struct qeth_card *card)
7693{
7694	int rc = 0;
7695#ifdef CONFIG_QETH_IPV6
7696	struct qeth_cmd_buffer *iob;
7697	struct qeth_ipa_cmd *cmd;
7698
7699	QETH_DBF_TEXT(setup, 2, "guniqeid");
7700
7701	if (!qeth_is_supported(card,IPA_IPV6)) {
7702		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7703					UNIQUE_ID_NOT_BY_CARD;
7704		return 0;
7705	}
7706
7707	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7708				     QETH_PROT_IPV6);
7709	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7710	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7711		            card->info.unique_id;
7712
7713	rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7714#else
7715	card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7716				UNIQUE_ID_NOT_BY_CARD;
7717#endif
7718	return rc;
7719}
7720static void
7721qeth_print_status_with_portname(struct qeth_card *card)
7722{
7723	char dbf_text[15];
7724	int i;
7725
7726	sprintf(dbf_text, "%s", card->info.portname + 1);
7727	for (i = 0; i < 8; i++)
7728		dbf_text[i] =
7729			(char) _ebcasc[(__u8) dbf_text[i]];
7730	dbf_text[8] = 0;
7731	printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7732	       "with link type %s (portname: %s)\n",
7733	       CARD_RDEV_ID(card),
7734	       CARD_WDEV_ID(card),
7735	       CARD_DDEV_ID(card),
7736	       qeth_get_cardname(card),
7737	       (card->info.mcl_level[0]) ? " (level: " : "",
7738	       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7739	       (card->info.mcl_level[0]) ? ")" : "",
7740	       qeth_get_cardname_short(card),
7741	       dbf_text);
7742
7743}
7744
7745static void
7746qeth_print_status_no_portname(struct qeth_card *card)
7747{
7748	if (card->info.portname[0])
7749		printk("qeth: Device %s/%s/%s is a%s "
7750		       "card%s%s%s\nwith link type %s "
7751		       "(no portname needed by interface).\n",
7752		       CARD_RDEV_ID(card),
7753		       CARD_WDEV_ID(card),
7754		       CARD_DDEV_ID(card),
7755		       qeth_get_cardname(card),
7756		       (card->info.mcl_level[0]) ? " (level: " : "",
7757		       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7758		       (card->info.mcl_level[0]) ? ")" : "",
7759		       qeth_get_cardname_short(card));
7760	else
7761		printk("qeth: Device %s/%s/%s is a%s "
7762		       "card%s%s%s\nwith link type %s.\n",
7763		       CARD_RDEV_ID(card),
7764		       CARD_WDEV_ID(card),
7765		       CARD_DDEV_ID(card),
7766		       qeth_get_cardname(card),
7767		       (card->info.mcl_level[0]) ? " (level: " : "",
7768		       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7769		       (card->info.mcl_level[0]) ? ")" : "",
7770		       qeth_get_cardname_short(card));
7771}
7772
7773static void
7774qeth_print_status_message(struct qeth_card *card)
7775{
7776	switch (card->info.type) {
7777	case QETH_CARD_TYPE_OSAE:
7778		/* VM will use a non-zero first character
7779		 * to indicate a HiperSockets like reporting
7780		 * of the level OSA sets the first character to zero
7781		 * */
7782		if (!card->info.mcl_level[0]) {
7783			sprintf(card->info.mcl_level,"%02x%02x",
7784				card->info.mcl_level[2],
7785				card->info.mcl_level[3]);
7786
7787			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7788			break;
7789		}
7790		/* fallthrough */
7791	case QETH_CARD_TYPE_IQD:
7792		if (card->info.guestlan) {
7793			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
7794				card->info.mcl_level[0]];
7795			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
7796				card->info.mcl_level[1]];
7797			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
7798				card->info.mcl_level[2]];
7799			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
7800				card->info.mcl_level[3]];
7801			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7802		}
7803		break;
7804	default:
7805		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
7806	}
7807	if (card->info.portname_required)
7808		qeth_print_status_with_portname(card);
7809	else
7810		qeth_print_status_no_portname(card);
7811}
7812
7813static int
7814qeth_register_netdev(struct qeth_card *card)
7815{
7816	QETH_DBF_TEXT(setup, 3, "regnetd");
7817	if (card->dev->reg_state != NETREG_UNINITIALIZED)
7818		return 0;
7819	/* sysfs magic */
7820	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
7821	return register_netdev(card->dev);
7822}
7823
7824static void
7825qeth_start_again(struct qeth_card *card, int recovery_mode)
7826{
7827	QETH_DBF_TEXT(setup ,2, "startag");
7828
7829	if (recovery_mode &&
7830	    card->info.type != QETH_CARD_TYPE_OSN) {
7831		qeth_open(card->dev);
7832	} else {
7833		rtnl_lock();
7834		dev_open(card->dev);
7835		rtnl_unlock();
7836	}
7837	/* this also sets saved unicast addresses */
7838	qeth_set_multicast_list(card->dev);
7839}
7840
7841
7842/* Layer 2 specific stuff */
7843#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
7844        if (card->options.option == value) { \
7845                PRINT_ERR("%s not supported with layer 2 " \
7846                          "functionality, ignoring option on read" \
7847			  "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7848                card->options.option = reset_value; \
7849        }
7850#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
7851        if (card->options.option != value) { \
7852                PRINT_ERR("%s not supported with layer 2 " \
7853                          "functionality, ignoring option on read" \
7854			  "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7855                card->options.option = reset_value; \
7856        }
7857
7858
7859static void qeth_make_parameters_consistent(struct qeth_card *card)
7860{
7861
7862	if (card->options.layer2 == 0)
7863		return;
7864	if (card->info.type == QETH_CARD_TYPE_OSN)
7865		return;
7866	if (card->info.type == QETH_CARD_TYPE_IQD) {
7867       		PRINT_ERR("Device %s does not support layer 2 functionality." \
7868	               	  " Ignoring layer2 option.\n",CARD_BUS_ID(card));
7869       		card->options.layer2 = 0;
7870		return;
7871	}
7872       	IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
7873               	         "Routing options are");
7874#ifdef CONFIG_QETH_IPV6
7875       	IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
7876               	         "Routing options are");
7877#endif
7878       	IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
7879                       	QETH_CHECKSUM_DEFAULT,
7880               	        "Checksumming options are");
7881       	IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
7882                       	 QETH_TR_BROADCAST_ALLRINGS,
7883               	         "Broadcast mode options are");
7884       	IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
7885                       	 QETH_TR_MACADDR_NONCANONICAL,
7886               	         "Canonical MAC addr options are");
7887       	IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
7888			 "Broadcast faking options are");
7889       	IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
7890       	                 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
7891        IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
7892}
7893
7894
7895static int
7896__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
7897{
7898	struct qeth_card *card = gdev->dev.driver_data;
7899	int rc = 0;
7900	enum qeth_card_states recover_flag;
7901
7902	BUG_ON(!card);
7903	QETH_DBF_TEXT(setup ,2, "setonlin");
7904	QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7905
7906	qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
7907	if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
7908		PRINT_WARN("set_online of card %s interrupted by user!\n",
7909			   CARD_BUS_ID(card));
7910		return -ERESTARTSYS;
7911	}
7912
7913	recover_flag = card->state;
7914	if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
7915	    (rc = ccw_device_set_online(CARD_WDEV(card))) ||
7916	    (rc = ccw_device_set_online(CARD_DDEV(card)))){
7917		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7918		return -EIO;
7919	}
7920
7921	qeth_make_parameters_consistent(card);
7922
7923	if ((rc = qeth_hardsetup_card(card))){
7924		QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7925		goto out_remove;
7926	}
7927	card->state = CARD_STATE_HARDSETUP;
7928
7929	if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
7930		rc = qeth_get_unique_id(card);
7931
7932	if (rc && card->options.layer2 == 0) {
7933		QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7934		goto out_remove;
7935	}
7936	qeth_print_status_message(card);
7937	if ((rc = qeth_register_netdev(card))){
7938		QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7939		goto out_remove;
7940	}
7941	if ((rc = qeth_softsetup_card(card))){
7942		QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7943		goto out_remove;
7944	}
7945
7946	if ((rc = qeth_init_qdio_queues(card))){
7947		QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
7948		goto out_remove;
7949	}
7950	card->state = CARD_STATE_SOFTSETUP;
7951	netif_carrier_on(card->dev);
7952
7953	qeth_set_allowed_threads(card, 0xffffffff, 0);
7954	if (recover_flag == CARD_STATE_RECOVER)
7955		qeth_start_again(card, recovery_mode);
7956	qeth_notify_processes();
7957	return 0;
7958out_remove:
7959	card->use_hard_stop = 1;
7960	qeth_stop_card(card, 0);
7961	ccw_device_set_offline(CARD_DDEV(card));
7962	ccw_device_set_offline(CARD_WDEV(card));
7963	ccw_device_set_offline(CARD_RDEV(card));
7964	if (recover_flag == CARD_STATE_RECOVER)
7965		card->state = CARD_STATE_RECOVER;
7966	else
7967		card->state = CARD_STATE_DOWN;
7968	return -ENODEV;
7969}
7970
7971static int
7972qeth_set_online(struct ccwgroup_device *gdev)
7973{
7974	return __qeth_set_online(gdev, 0);
7975}
7976
7977static struct ccw_device_id qeth_ids[] = {
7978	{CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
7979	{CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
7980	{CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
7981	{},
7982};
7983MODULE_DEVICE_TABLE(ccw, qeth_ids);
7984
7985struct device *qeth_root_dev = NULL;
7986
7987struct ccwgroup_driver qeth_ccwgroup_driver = {
7988	.owner = THIS_MODULE,
7989	.name = "qeth",
7990	.driver_id = 0xD8C5E3C8,
7991	.probe = qeth_probe_device,
7992	.remove = qeth_remove_device,
7993	.set_online = qeth_set_online,
7994	.set_offline = qeth_set_offline,
7995};
7996
7997struct ccw_driver qeth_ccw_driver = {
7998	.name = "qeth",
7999	.ids = qeth_ids,
8000	.probe = ccwgroup_probe_ccwdev,
8001	.remove = ccwgroup_remove_ccwdev,
8002};
8003
8004
8005static void
8006qeth_unregister_dbf_views(void)
8007{
8008	if (qeth_dbf_setup)
8009		debug_unregister(qeth_dbf_setup);
8010	if (qeth_dbf_qerr)
8011		debug_unregister(qeth_dbf_qerr);
8012	if (qeth_dbf_sense)
8013		debug_unregister(qeth_dbf_sense);
8014	if (qeth_dbf_misc)
8015		debug_unregister(qeth_dbf_misc);
8016	if (qeth_dbf_data)
8017		debug_unregister(qeth_dbf_data);
8018	if (qeth_dbf_control)
8019		debug_unregister(qeth_dbf_control);
8020	if (qeth_dbf_trace)
8021		debug_unregister(qeth_dbf_trace);
8022}
8023static int
8024qeth_register_dbf_views(void)
8025{
8026	qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
8027					QETH_DBF_SETUP_PAGES,
8028					QETH_DBF_SETUP_NR_AREAS,
8029					QETH_DBF_SETUP_LEN);
8030	qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
8031				       QETH_DBF_MISC_PAGES,
8032				       QETH_DBF_MISC_NR_AREAS,
8033				       QETH_DBF_MISC_LEN);
8034	qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
8035				       QETH_DBF_DATA_PAGES,
8036				       QETH_DBF_DATA_NR_AREAS,
8037				       QETH_DBF_DATA_LEN);
8038	qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
8039					  QETH_DBF_CONTROL_PAGES,
8040					  QETH_DBF_CONTROL_NR_AREAS,
8041					  QETH_DBF_CONTROL_LEN);
8042	qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
8043					QETH_DBF_SENSE_PAGES,
8044					QETH_DBF_SENSE_NR_AREAS,
8045					QETH_DBF_SENSE_LEN);
8046	qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
8047				       QETH_DBF_QERR_PAGES,
8048				       QETH_DBF_QERR_NR_AREAS,
8049				       QETH_DBF_QERR_LEN);
8050	qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
8051					QETH_DBF_TRACE_PAGES,
8052					QETH_DBF_TRACE_NR_AREAS,
8053					QETH_DBF_TRACE_LEN);
8054
8055	if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL)    ||
8056	    (qeth_dbf_data == NULL)  || (qeth_dbf_control == NULL) ||
8057	    (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL)    ||
8058	    (qeth_dbf_trace == NULL)) {
8059		qeth_unregister_dbf_views();
8060		return -ENOMEM;
8061	}
8062	debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
8063	debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
8064
8065	debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
8066	debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
8067
8068	debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
8069	debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
8070
8071	debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
8072	debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
8073
8074	debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
8075	debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
8076
8077	debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
8078	debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
8079
8080	debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
8081	debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
8082
8083	return 0;
8084}
8085
8086#ifdef CONFIG_QETH_IPV6
8087extern struct neigh_table arp_tbl;
8088static struct neigh_ops *arp_direct_ops;
8089static int (*qeth_old_arp_constructor) (struct neighbour *);
8090
8091static struct neigh_ops arp_direct_ops_template = {
8092	.family = AF_INET,
8093	.solicit = NULL,
8094	.error_report = NULL,
8095	.output = dev_queue_xmit,
8096	.connected_output = dev_queue_xmit,
8097	.hh_output = dev_queue_xmit,
8098	.queue_xmit = dev_queue_xmit
8099};
8100
8101static int
8102qeth_arp_constructor(struct neighbour *neigh)
8103{
8104	struct net_device *dev = neigh->dev;
8105	struct in_device *in_dev;
8106	struct neigh_parms *parms;
8107	struct qeth_card *card;
8108
8109	card = qeth_get_card_from_dev(dev);
8110	if (card == NULL)
8111		goto out;
8112	if((card->options.layer2) ||
8113	   (card->dev->hard_header == qeth_fake_header))
8114		goto out;
8115
8116	rcu_read_lock();
8117	in_dev = __in_dev_get_rcu(dev);
8118	if (in_dev == NULL) {
8119		rcu_read_unlock();
8120		return -EINVAL;
8121	}
8122
8123	parms = in_dev->arp_parms;
8124	__neigh_parms_put(neigh->parms);
8125	neigh->parms = neigh_parms_clone(parms);
8126	rcu_read_unlock();
8127
8128	neigh->type = inet_addr_type(*(__be32 *) neigh->primary_key);
8129	neigh->nud_state = NUD_NOARP;
8130	neigh->ops = arp_direct_ops;
8131	neigh->output = neigh->ops->queue_xmit;
8132	return 0;
8133out:
8134	return qeth_old_arp_constructor(neigh);
8135}
8136#endif  /*CONFIG_QETH_IPV6*/
8137
8138/*
8139 * IP address takeover related functions
8140 */
8141static void
8142qeth_clear_ipato_list(struct qeth_card *card)
8143{
8144	struct qeth_ipato_entry *ipatoe, *tmp;
8145	unsigned long flags;
8146
8147	spin_lock_irqsave(&card->ip_lock, flags);
8148	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
8149		list_del(&ipatoe->entry);
8150		kfree(ipatoe);
8151	}
8152	spin_unlock_irqrestore(&card->ip_lock, flags);
8153}
8154
8155int
8156qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
8157{
8158	struct qeth_ipato_entry *ipatoe;
8159	unsigned long flags;
8160	int rc = 0;
8161
8162	QETH_DBF_TEXT(trace, 2, "addipato");
8163	spin_lock_irqsave(&card->ip_lock, flags);
8164	list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8165		if (ipatoe->proto != new->proto)
8166			continue;
8167		if (!memcmp(ipatoe->addr, new->addr,
8168			    (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
8169		    (ipatoe->mask_bits == new->mask_bits)){
8170			PRINT_WARN("ipato entry already exists!\n");
8171			rc = -EEXIST;
8172			break;
8173		}
8174	}
8175	if (!rc) {
8176		list_add_tail(&new->entry, &card->ipato.entries);
8177	}
8178	spin_unlock_irqrestore(&card->ip_lock, flags);
8179	return rc;
8180}
8181
8182void
8183qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8184		     u8 *addr, int mask_bits)
8185{
8186	struct qeth_ipato_entry *ipatoe, *tmp;
8187	unsigned long flags;
8188
8189	QETH_DBF_TEXT(trace, 2, "delipato");
8190	spin_lock_irqsave(&card->ip_lock, flags);
8191	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
8192		if (ipatoe->proto != proto)
8193			continue;
8194		if (!memcmp(ipatoe->addr, addr,
8195			    (proto == QETH_PROT_IPV4)? 4:16) &&
8196		    (ipatoe->mask_bits == mask_bits)){
8197			list_del(&ipatoe->entry);
8198			kfree(ipatoe);
8199		}
8200	}
8201	spin_unlock_irqrestore(&card->ip_lock, flags);
8202}
8203
8204static void
8205qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8206{
8207	int i, j;
8208	u8 octet;
8209
8210	for (i = 0; i < len; ++i){
8211		octet = addr[i];
8212		for (j = 7; j >= 0; --j){
8213			bits[i*8 + j] = octet & 1;
8214			octet >>= 1;
8215		}
8216	}
8217}
8218
8219static int
8220qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
8221{
8222	struct qeth_ipato_entry *ipatoe;
8223	u8 addr_bits[128] = {0, };
8224	u8 ipatoe_bits[128] = {0, };
8225	int rc = 0;
8226
8227	if (!card->ipato.enabled)
8228		return 0;
8229
8230	qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
8231				  (addr->proto == QETH_PROT_IPV4)? 4:16);
8232	list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8233		if (addr->proto != ipatoe->proto)
8234			continue;
8235		qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
8236					  (ipatoe->proto==QETH_PROT_IPV4) ?
8237					  4:16);
8238		if (addr->proto == QETH_PROT_IPV4)
8239			rc = !memcmp(addr_bits, ipatoe_bits,
8240				     min(32, ipatoe->mask_bits));
8241		else
8242			rc = !memcmp(addr_bits, ipatoe_bits,
8243				     min(128, ipatoe->mask_bits));
8244		if (rc)
8245			break;
8246	}
8247	/* invert? */
8248	if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
8249		rc = !rc;
8250	else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
8251		rc = !rc;
8252
8253	return rc;
8254}
8255
8256/*
8257 * VIPA related functions
8258 */
8259int
8260qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8261	      const u8 *addr)
8262{
8263	struct qeth_ipaddr *ipaddr;
8264	unsigned long flags;
8265	int rc = 0;
8266
8267	ipaddr = qeth_get_addr_buffer(proto);
8268	if (ipaddr){
8269		if (proto == QETH_PROT_IPV4){
8270			QETH_DBF_TEXT(trace, 2, "addvipa4");
8271			memcpy(&ipaddr->u.a4.addr, addr, 4);
8272			ipaddr->u.a4.mask = 0;
8273#ifdef CONFIG_QETH_IPV6
8274		} else if (proto == QETH_PROT_IPV6){
8275			QETH_DBF_TEXT(trace, 2, "addvipa6");
8276			memcpy(&ipaddr->u.a6.addr, addr, 16);
8277			ipaddr->u.a6.pfxlen = 0;
8278#endif
8279		}
8280		ipaddr->type = QETH_IP_TYPE_VIPA;
8281		ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
8282		ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
8283	} else
8284		return -ENOMEM;
8285	spin_lock_irqsave(&card->ip_lock, flags);
8286	if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8287	    __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8288		rc = -EEXIST;
8289	spin_unlock_irqrestore(&card->ip_lock, flags);
8290	if (rc){
8291		PRINT_WARN("Cannot add VIPA. Address already exists!\n");
8292		return rc;
8293	}
8294	if (!qeth_add_ip(card, ipaddr))
8295		kfree(ipaddr);
8296	qeth_set_ip_addr_list(card);
8297	return rc;
8298}
8299
8300void
8301qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8302	      const u8 *addr)
8303{
8304	struct qeth_ipaddr *ipaddr;
8305
8306	ipaddr = qeth_get_addr_buffer(proto);
8307	if (ipaddr){
8308		if (proto == QETH_PROT_IPV4){
8309			QETH_DBF_TEXT(trace, 2, "delvipa4");
8310			memcpy(&ipaddr->u.a4.addr, addr, 4);
8311			ipaddr->u.a4.mask = 0;
8312#ifdef CONFIG_QETH_IPV6
8313		} else if (proto == QETH_PROT_IPV6){
8314			QETH_DBF_TEXT(trace, 2, "delvipa6");
8315			memcpy(&ipaddr->u.a6.addr, addr, 16);
8316			ipaddr->u.a6.pfxlen = 0;
8317#endif
8318		}
8319		ipaddr->type = QETH_IP_TYPE_VIPA;
8320	} else
8321		return;
8322	if (!qeth_delete_ip(card, ipaddr))
8323		kfree(ipaddr);
8324	qeth_set_ip_addr_list(card);
8325}
8326
8327/*
8328 * proxy ARP related functions
8329 */
8330int
8331qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8332	      const u8 *addr)
8333{
8334	struct qeth_ipaddr *ipaddr;
8335	unsigned long flags;
8336	int rc = 0;
8337
8338	ipaddr = qeth_get_addr_buffer(proto);
8339	if (ipaddr){
8340		if (proto == QETH_PROT_IPV4){
8341			QETH_DBF_TEXT(trace, 2, "addrxip4");
8342			memcpy(&ipaddr->u.a4.addr, addr, 4);
8343			ipaddr->u.a4.mask = 0;
8344#ifdef CONFIG_QETH_IPV6
8345		} else if (proto == QETH_PROT_IPV6){
8346			QETH_DBF_TEXT(trace, 2, "addrxip6");
8347			memcpy(&ipaddr->u.a6.addr, addr, 16);
8348			ipaddr->u.a6.pfxlen = 0;
8349#endif
8350		}
8351		ipaddr->type = QETH_IP_TYPE_RXIP;
8352		ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
8353		ipaddr->del_flags = 0;
8354	} else
8355		return -ENOMEM;
8356	spin_lock_irqsave(&card->ip_lock, flags);
8357	if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8358	    __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8359		rc = -EEXIST;
8360	spin_unlock_irqrestore(&card->ip_lock, flags);
8361	if (rc){
8362		PRINT_WARN("Cannot add RXIP. Address already exists!\n");
8363		return rc;
8364	}
8365	if (!qeth_add_ip(card, ipaddr))
8366		kfree(ipaddr);
8367	qeth_set_ip_addr_list(card);
8368	return 0;
8369}
8370
8371void
8372qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8373	      const u8 *addr)
8374{
8375	struct qeth_ipaddr *ipaddr;
8376
8377	ipaddr = qeth_get_addr_buffer(proto);
8378	if (ipaddr){
8379		if (proto == QETH_PROT_IPV4){
8380			QETH_DBF_TEXT(trace, 2, "addrxip4");
8381			memcpy(&ipaddr->u.a4.addr, addr, 4);
8382			ipaddr->u.a4.mask = 0;
8383#ifdef CONFIG_QETH_IPV6
8384		} else if (proto == QETH_PROT_IPV6){
8385			QETH_DBF_TEXT(trace, 2, "addrxip6");
8386			memcpy(&ipaddr->u.a6.addr, addr, 16);
8387			ipaddr->u.a6.pfxlen = 0;
8388#endif
8389		}
8390		ipaddr->type = QETH_IP_TYPE_RXIP;
8391	} else
8392		return;
8393	if (!qeth_delete_ip(card, ipaddr))
8394		kfree(ipaddr);
8395	qeth_set_ip_addr_list(card);
8396}
8397
8398/**
8399 * IP event handler
8400 */
8401static int
8402qeth_ip_event(struct notifier_block *this,
8403	      unsigned long event,void *ptr)
8404{
8405	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
8406	struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
8407	struct qeth_ipaddr *addr;
8408	struct qeth_card *card;
8409
8410	QETH_DBF_TEXT(trace,3,"ipevent");
8411	card = qeth_get_card_from_dev(dev);
8412	if (!card)
8413		return NOTIFY_DONE;
8414	if (card->options.layer2)
8415		return NOTIFY_DONE;
8416
8417	addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
8418	if (addr != NULL) {
8419		addr->u.a4.addr = ifa->ifa_address;
8420		addr->u.a4.mask = ifa->ifa_mask;
8421		addr->type = QETH_IP_TYPE_NORMAL;
8422	} else
8423		goto out;
8424
8425	switch(event) {
8426	case NETDEV_UP:
8427		if (!qeth_add_ip(card, addr))
8428			kfree(addr);
8429		break;
8430	case NETDEV_DOWN:
8431		if (!qeth_delete_ip(card, addr))
8432			kfree(addr);
8433		break;
8434	default:
8435		break;
8436	}
8437	qeth_set_ip_addr_list(card);
8438out:
8439	return NOTIFY_DONE;
8440}
8441
8442static struct notifier_block qeth_ip_notifier = {
8443	qeth_ip_event,
8444	NULL,
8445};
8446
8447#ifdef CONFIG_QETH_IPV6
8448/**
8449 * IPv6 event handler
8450 */
8451static int
8452qeth_ip6_event(struct notifier_block *this,
8453	      unsigned long event,void *ptr)
8454{
8455
8456	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
8457	struct net_device *dev = (struct net_device *)ifa->idev->dev;
8458	struct qeth_ipaddr *addr;
8459	struct qeth_card *card;
8460
8461	QETH_DBF_TEXT(trace,3,"ip6event");
8462
8463	card = qeth_get_card_from_dev(dev);
8464	if (!card)
8465		return NOTIFY_DONE;
8466	if (!qeth_is_supported(card, IPA_IPV6))
8467		return NOTIFY_DONE;
8468
8469	addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
8470	if (addr != NULL) {
8471		memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
8472		addr->u.a6.pfxlen = ifa->prefix_len;
8473		addr->type = QETH_IP_TYPE_NORMAL;
8474	} else
8475		goto out;
8476
8477	switch(event) {
8478	case NETDEV_UP:
8479		if (!qeth_add_ip(card, addr))
8480			kfree(addr);
8481		break;
8482	case NETDEV_DOWN:
8483		if (!qeth_delete_ip(card, addr))
8484			kfree(addr);
8485		break;
8486	default:
8487		break;
8488	}
8489	qeth_set_ip_addr_list(card);
8490out:
8491	return NOTIFY_DONE;
8492}
8493
8494static struct notifier_block qeth_ip6_notifier = {
8495	qeth_ip6_event,
8496	NULL,
8497};
8498#endif
8499
8500static int
8501__qeth_reboot_event_card(struct device *dev, void *data)
8502{
8503	struct qeth_card *card;
8504
8505	card = (struct qeth_card *) dev->driver_data;
8506	qeth_clear_ip_list(card, 0, 0);
8507	qeth_qdio_clear_card(card, 0);
8508	qeth_clear_qdio_buffers(card);
8509	return 0;
8510}
8511
8512static int
8513qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8514{
8515	int ret;
8516
8517	ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8518				     __qeth_reboot_event_card);
8519	return ret ? NOTIFY_BAD : NOTIFY_DONE;
8520}
8521
8522
8523static struct notifier_block qeth_reboot_notifier = {
8524	qeth_reboot_event,
8525	NULL,
8526};
8527
8528static int
8529qeth_register_notifiers(void)
8530{
8531        int r;
8532
8533	QETH_DBF_TEXT(trace,5,"regnotif");
8534	if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8535		return r;
8536	if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8537		goto out_reboot;
8538#ifdef CONFIG_QETH_IPV6
8539	if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8540		goto out_ipv4;
8541#endif
8542	return 0;
8543
8544#ifdef CONFIG_QETH_IPV6
8545out_ipv4:
8546	unregister_inetaddr_notifier(&qeth_ip_notifier);
8547#endif
8548out_reboot:
8549	unregister_reboot_notifier(&qeth_reboot_notifier);
8550	return r;
8551}
8552
8553/**
8554 * unregister all event notifiers
8555 */
8556static void
8557qeth_unregister_notifiers(void)
8558{
8559
8560	QETH_DBF_TEXT(trace,5,"unregnot");
8561	BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8562	BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8563#ifdef CONFIG_QETH_IPV6
8564	BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8565#endif /* QETH_IPV6 */
8566
8567}
8568
8569#ifdef CONFIG_QETH_IPV6
8570static int
8571qeth_ipv6_init(void)
8572{
8573	qeth_old_arp_constructor = arp_tbl.constructor;
8574	write_lock_bh(&arp_tbl.lock);
8575	arp_tbl.constructor = qeth_arp_constructor;
8576	write_unlock_bh(&arp_tbl.lock);
8577
8578	arp_direct_ops = (struct neigh_ops*)
8579		kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8580	if (!arp_direct_ops)
8581		return -ENOMEM;
8582
8583	memcpy(arp_direct_ops, &arp_direct_ops_template,
8584	       sizeof(struct neigh_ops));
8585
8586	return 0;
8587}
8588
8589static void
8590qeth_ipv6_uninit(void)
8591{
8592	write_lock_bh(&arp_tbl.lock);
8593	arp_tbl.constructor = qeth_old_arp_constructor;
8594	write_unlock_bh(&arp_tbl.lock);
8595	kfree(arp_direct_ops);
8596}
8597#endif /* CONFIG_QETH_IPV6 */
8598
8599static void
8600qeth_sysfs_unregister(void)
8601{
8602	s390_root_dev_unregister(qeth_root_dev);
8603	qeth_remove_driver_attributes();
8604	ccw_driver_unregister(&qeth_ccw_driver);
8605	ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8606}
8607
8608/**
8609 * register qeth at sysfs
8610 */
8611static int
8612qeth_sysfs_register(void)
8613{
8614	int rc;
8615
8616	rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8617	if (rc)
8618		goto out;
8619
8620	rc = ccw_driver_register(&qeth_ccw_driver);
8621	if (rc)
8622		goto out_ccw_driver;
8623
8624	rc = qeth_create_driver_attributes();
8625	if (rc)
8626		goto out_qeth_attr;
8627
8628	qeth_root_dev = s390_root_dev_register("qeth");
8629	rc = IS_ERR(qeth_root_dev) ? PTR_ERR(qeth_root_dev) : 0;
8630	if (!rc)
8631		goto out;
8632
8633	qeth_remove_driver_attributes();
8634out_qeth_attr:
8635	ccw_driver_unregister(&qeth_ccw_driver);
8636out_ccw_driver:
8637	ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8638out:
8639	return rc;
8640}
8641
8642/***
8643 * init function
8644 */
8645static int __init
8646qeth_init(void)
8647{
8648	int rc;
8649
8650	PRINT_INFO("loading %s\n", version);
8651
8652	INIT_LIST_HEAD(&qeth_card_list.list);
8653	INIT_LIST_HEAD(&qeth_notify_list);
8654	spin_lock_init(&qeth_notify_lock);
8655	rwlock_init(&qeth_card_list.rwlock);
8656
8657	rc = qeth_register_dbf_views();
8658	if (rc)
8659		goto out_err;
8660
8661	rc = qeth_sysfs_register();
8662	if (rc)
8663		goto out_dbf;
8664
8665#ifdef CONFIG_QETH_IPV6
8666	rc = qeth_ipv6_init();
8667	if (rc) {
8668		PRINT_ERR("Out of memory during ipv6 init code = %d\n", rc);
8669		goto out_sysfs;
8670	}
8671#endif /* QETH_IPV6 */
8672	rc = qeth_register_notifiers();
8673	if (rc)
8674		goto out_ipv6;
8675	rc = qeth_create_procfs_entries();
8676	if (rc)
8677		goto out_notifiers;
8678
8679	return rc;
8680
8681out_notifiers:
8682	qeth_unregister_notifiers();
8683out_ipv6:
8684#ifdef CONFIG_QETH_IPV6
8685	qeth_ipv6_uninit();
8686out_sysfs:
8687#endif /* QETH_IPV6 */
8688	qeth_sysfs_unregister();
8689out_dbf:
8690	qeth_unregister_dbf_views();
8691out_err:
8692	PRINT_ERR("Initialization failed with code %d\n", rc);
8693	return rc;
8694}
8695
8696static void
8697__exit qeth_exit(void)
8698{
8699	struct qeth_card *card, *tmp;
8700	unsigned long flags;
8701
8702	QETH_DBF_TEXT(trace,1, "cleanup.");
8703
8704	/*
8705	 * Weed would not need to clean up our devices here, because the
8706	 * common device layer calls qeth_remove_device for each device
8707	 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8708	 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8709	 * qeth_remove_device called by the common device layer would otherwise
8710	 * do a "hard" shutdown (card->use_hard_stop is set to one in
8711	 * qeth_remove_device).
8712	 */
8713again:
8714	read_lock_irqsave(&qeth_card_list.rwlock, flags);
8715	list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8716		read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8717		qeth_set_offline(card->gdev);
8718		qeth_remove_device(card->gdev);
8719		goto again;
8720	}
8721	read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8722#ifdef CONFIG_QETH_IPV6
8723	qeth_ipv6_uninit();
8724#endif
8725	qeth_unregister_notifiers();
8726	qeth_remove_procfs_entries();
8727	qeth_sysfs_unregister();
8728	qeth_unregister_dbf_views();
8729	printk("qeth: removed\n");
8730}
8731
8732EXPORT_SYMBOL(qeth_osn_register);
8733EXPORT_SYMBOL(qeth_osn_deregister);
8734EXPORT_SYMBOL(qeth_osn_assist);
8735module_init(qeth_init);
8736module_exit(qeth_exit);
8737MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
8738MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8739		                      "Copyright 2000,2003 IBM Corporation\n");
8740
8741MODULE_LICENSE("GPL");
8742