• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/s390/net/
1/*
2 *  Linux for S/390 Lan Channel Station Network Driver
3 *
4 *  Copyright IBM Corp. 1999, 2009
5 *  Author(s): Original Code written by
6 *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
7 *	       Rewritten by
8 *			Frank Pavlic <fpavlic@de.ibm.com> and
9 *			Martin Schwidefsky <schwidefsky@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#define KMSG_COMPONENT		"lcs"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29#include <linux/module.h>
30#include <linux/if.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/trdevice.h>
34#include <linux/fddidevice.h>
35#include <linux/inetdevice.h>
36#include <linux/in.h>
37#include <linux/igmp.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/slab.h>
41#include <net/arp.h>
42#include <net/ip.h>
43
44#include <asm/debug.h>
45#include <asm/idals.h>
46#include <asm/timex.h>
47#include <linux/device.h>
48#include <asm/ccwgroup.h>
49
50#include "lcs.h"
51
52
53#if !defined(CONFIG_NET_ETHERNET) && !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
54#error Cannot compile lcs.c without some net devices switched on.
55#endif
56
57/**
58 * initialization string for output
59 */
60
61static char version[] __initdata = "LCS driver";
62
63/**
64  * the root device for lcs group devices
65  */
66static struct device *lcs_root_dev;
67
68/**
69 * Some prototypes.
70 */
71static void lcs_tasklet(unsigned long);
72static void lcs_start_kernel_thread(struct work_struct *);
73static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
74#ifdef CONFIG_IP_MULTICAST
75static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
76#endif /* CONFIG_IP_MULTICAST */
77static int lcs_recovery(void *ptr);
78
79/**
80 * Debug Facility Stuff
81 */
82static char debug_buffer[255];
83static debug_info_t *lcs_dbf_setup;
84static debug_info_t *lcs_dbf_trace;
85
86/**
87 *  LCS Debug Facility functions
88 */
89static void
90lcs_unregister_debug_facility(void)
91{
92	if (lcs_dbf_setup)
93		debug_unregister(lcs_dbf_setup);
94	if (lcs_dbf_trace)
95		debug_unregister(lcs_dbf_trace);
96}
97
98static int
99lcs_register_debug_facility(void)
100{
101	lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
102	lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
103	if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
104		pr_err("Not enough memory for debug facility.\n");
105		lcs_unregister_debug_facility();
106		return -ENOMEM;
107	}
108	debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
109	debug_set_level(lcs_dbf_setup, 2);
110	debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
111	debug_set_level(lcs_dbf_trace, 2);
112	return 0;
113}
114
115/**
116 * Allocate io buffers.
117 */
118static int
119lcs_alloc_channel(struct lcs_channel *channel)
120{
121	int cnt;
122
123	LCS_DBF_TEXT(2, setup, "ichalloc");
124	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
125		/* alloc memory fo iobuffer */
126		channel->iob[cnt].data =
127			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
128		if (channel->iob[cnt].data == NULL)
129			break;
130		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
131	}
132	if (cnt < LCS_NUM_BUFFS) {
133		/* Not all io buffers could be allocated. */
134		LCS_DBF_TEXT(2, setup, "echalloc");
135		while (cnt-- > 0)
136			kfree(channel->iob[cnt].data);
137		return -ENOMEM;
138	}
139	return 0;
140}
141
142/**
143 * Free io buffers.
144 */
145static void
146lcs_free_channel(struct lcs_channel *channel)
147{
148	int cnt;
149
150	LCS_DBF_TEXT(2, setup, "ichfree");
151	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
152		kfree(channel->iob[cnt].data);
153		channel->iob[cnt].data = NULL;
154	}
155}
156
157/*
158 * Cleanup channel.
159 */
160static void
161lcs_cleanup_channel(struct lcs_channel *channel)
162{
163	LCS_DBF_TEXT(3, setup, "cleanch");
164	/* Kill write channel tasklets. */
165	tasklet_kill(&channel->irq_tasklet);
166	/* Free channel buffers. */
167	lcs_free_channel(channel);
168}
169
170/**
171 * LCS free memory for card and channels.
172 */
173static void
174lcs_free_card(struct lcs_card *card)
175{
176	LCS_DBF_TEXT(2, setup, "remcard");
177	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
178	kfree(card);
179}
180
181/**
182 * LCS alloc memory for card and channels
183 */
184static struct lcs_card *
185lcs_alloc_card(void)
186{
187	struct lcs_card *card;
188	int rc;
189
190	LCS_DBF_TEXT(2, setup, "alloclcs");
191
192	card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
193	if (card == NULL)
194		return NULL;
195	card->lan_type = LCS_FRAME_TYPE_AUTO;
196	card->pkt_seq = 0;
197	card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
198	/* Allocate io buffers for the read channel. */
199	rc = lcs_alloc_channel(&card->read);
200	if (rc){
201		LCS_DBF_TEXT(2, setup, "iccwerr");
202		lcs_free_card(card);
203		return NULL;
204	}
205	/* Allocate io buffers for the write channel. */
206	rc = lcs_alloc_channel(&card->write);
207	if (rc) {
208		LCS_DBF_TEXT(2, setup, "iccwerr");
209		lcs_cleanup_channel(&card->read);
210		lcs_free_card(card);
211		return NULL;
212	}
213
214#ifdef CONFIG_IP_MULTICAST
215	INIT_LIST_HEAD(&card->ipm_list);
216#endif
217	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
218	return card;
219}
220
221/*
222 * Setup read channel.
223 */
224static void
225lcs_setup_read_ccws(struct lcs_card *card)
226{
227	int cnt;
228
229	LCS_DBF_TEXT(2, setup, "ireadccw");
230	/* Setup read ccws. */
231	memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
232	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
233		card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
234		card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
235		card->read.ccws[cnt].flags =
236			CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
237		/*
238		 * Note: we have allocated the buffer with GFP_DMA, so
239		 * we do not need to do set_normalized_cda.
240		 */
241		card->read.ccws[cnt].cda =
242			(__u32) __pa(card->read.iob[cnt].data);
243		((struct lcs_header *)
244		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
245		card->read.iob[cnt].callback = lcs_get_frames_cb;
246		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
247		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
248	}
249	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
250	card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
251	card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
252	/* Last ccw is a tic (transfer in channel). */
253	card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
254	card->read.ccws[LCS_NUM_BUFFS].cda =
255		(__u32) __pa(card->read.ccws);
256	/* Setg initial state of the read channel. */
257	card->read.state = LCS_CH_STATE_INIT;
258
259	card->read.io_idx = 0;
260	card->read.buf_idx = 0;
261}
262
263static void
264lcs_setup_read(struct lcs_card *card)
265{
266	LCS_DBF_TEXT(3, setup, "initread");
267
268	lcs_setup_read_ccws(card);
269	/* Initialize read channel tasklet. */
270	card->read.irq_tasklet.data = (unsigned long) &card->read;
271	card->read.irq_tasklet.func = lcs_tasklet;
272	/* Initialize waitqueue. */
273	init_waitqueue_head(&card->read.wait_q);
274}
275
276/*
277 * Setup write channel.
278 */
279static void
280lcs_setup_write_ccws(struct lcs_card *card)
281{
282	int cnt;
283
284	LCS_DBF_TEXT(3, setup, "iwritccw");
285	/* Setup write ccws. */
286	memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
287	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
288		card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
289		card->write.ccws[cnt].count = 0;
290		card->write.ccws[cnt].flags =
291			CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
292		/*
293		 * Note: we have allocated the buffer with GFP_DMA, so
294		 * we do not need to do set_normalized_cda.
295		 */
296		card->write.ccws[cnt].cda =
297			(__u32) __pa(card->write.iob[cnt].data);
298	}
299	/* Last ccw is a tic (transfer in channel). */
300	card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
301	card->write.ccws[LCS_NUM_BUFFS].cda =
302		(__u32) __pa(card->write.ccws);
303	/* Set initial state of the write channel. */
304	card->read.state = LCS_CH_STATE_INIT;
305
306	card->write.io_idx = 0;
307	card->write.buf_idx = 0;
308}
309
310static void
311lcs_setup_write(struct lcs_card *card)
312{
313	LCS_DBF_TEXT(3, setup, "initwrit");
314
315	lcs_setup_write_ccws(card);
316	/* Initialize write channel tasklet. */
317	card->write.irq_tasklet.data = (unsigned long) &card->write;
318	card->write.irq_tasklet.func = lcs_tasklet;
319	/* Initialize waitqueue. */
320	init_waitqueue_head(&card->write.wait_q);
321}
322
323static void
324lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
325{
326	unsigned long flags;
327
328	spin_lock_irqsave(&card->mask_lock, flags);
329	card->thread_allowed_mask = threads;
330	spin_unlock_irqrestore(&card->mask_lock, flags);
331	wake_up(&card->wait_q);
332}
333static inline int
334lcs_threads_running(struct lcs_card *card, unsigned long threads)
335{
336        unsigned long flags;
337        int rc = 0;
338
339	spin_lock_irqsave(&card->mask_lock, flags);
340        rc = (card->thread_running_mask & threads);
341	spin_unlock_irqrestore(&card->mask_lock, flags);
342        return rc;
343}
344
345static int
346lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
347{
348        return wait_event_interruptible(card->wait_q,
349                        lcs_threads_running(card, threads) == 0);
350}
351
352static inline int
353lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
354{
355        unsigned long flags;
356
357	spin_lock_irqsave(&card->mask_lock, flags);
358        if ( !(card->thread_allowed_mask & thread) ||
359              (card->thread_start_mask & thread) ) {
360                spin_unlock_irqrestore(&card->mask_lock, flags);
361                return -EPERM;
362        }
363        card->thread_start_mask |= thread;
364	spin_unlock_irqrestore(&card->mask_lock, flags);
365        return 0;
366}
367
368static void
369lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
370{
371        unsigned long flags;
372
373	spin_lock_irqsave(&card->mask_lock, flags);
374        card->thread_running_mask &= ~thread;
375	spin_unlock_irqrestore(&card->mask_lock, flags);
376        wake_up(&card->wait_q);
377}
378
379static inline int
380__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
381{
382        unsigned long flags;
383        int rc = 0;
384
385	spin_lock_irqsave(&card->mask_lock, flags);
386        if (card->thread_start_mask & thread){
387                if ((card->thread_allowed_mask & thread) &&
388                    !(card->thread_running_mask & thread)){
389                        rc = 1;
390                        card->thread_start_mask &= ~thread;
391                        card->thread_running_mask |= thread;
392                } else
393                        rc = -EPERM;
394        }
395	spin_unlock_irqrestore(&card->mask_lock, flags);
396        return rc;
397}
398
399static int
400lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
401{
402        int rc = 0;
403        wait_event(card->wait_q,
404                   (rc = __lcs_do_run_thread(card, thread)) >= 0);
405        return rc;
406}
407
408static int
409lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
410{
411        unsigned long flags;
412        int rc = 0;
413
414	spin_lock_irqsave(&card->mask_lock, flags);
415        LCS_DBF_TEXT_(4, trace, "  %02x%02x%02x",
416                        (u8) card->thread_start_mask,
417                        (u8) card->thread_allowed_mask,
418                        (u8) card->thread_running_mask);
419        rc = (card->thread_start_mask & thread);
420	spin_unlock_irqrestore(&card->mask_lock, flags);
421        return rc;
422}
423
424/**
425 * Initialize channels,card and state machines.
426 */
427static void
428lcs_setup_card(struct lcs_card *card)
429{
430	LCS_DBF_TEXT(2, setup, "initcard");
431	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
432
433	lcs_setup_read(card);
434	lcs_setup_write(card);
435	/* Set cards initial state. */
436	card->state = DEV_STATE_DOWN;
437	card->tx_buffer = NULL;
438	card->tx_emitted = 0;
439
440	init_waitqueue_head(&card->wait_q);
441	spin_lock_init(&card->lock);
442	spin_lock_init(&card->ipm_lock);
443	spin_lock_init(&card->mask_lock);
444#ifdef CONFIG_IP_MULTICAST
445	INIT_LIST_HEAD(&card->ipm_list);
446#endif
447	INIT_LIST_HEAD(&card->lancmd_waiters);
448}
449
450static inline void
451lcs_clear_multicast_list(struct lcs_card *card)
452{
453#ifdef	CONFIG_IP_MULTICAST
454	struct lcs_ipm_list *ipm;
455	unsigned long flags;
456
457	/* Free multicast list. */
458	LCS_DBF_TEXT(3, setup, "clmclist");
459	spin_lock_irqsave(&card->ipm_lock, flags);
460	while (!list_empty(&card->ipm_list)){
461		ipm = list_entry(card->ipm_list.next,
462				 struct lcs_ipm_list, list);
463		list_del(&ipm->list);
464		if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
465			spin_unlock_irqrestore(&card->ipm_lock, flags);
466			lcs_send_delipm(card, ipm);
467			spin_lock_irqsave(&card->ipm_lock, flags);
468		}
469		kfree(ipm);
470	}
471	spin_unlock_irqrestore(&card->ipm_lock, flags);
472#endif
473}
474/**
475 * Cleanup channels,card and state machines.
476 */
477static void
478lcs_cleanup_card(struct lcs_card *card)
479{
480
481	LCS_DBF_TEXT(3, setup, "cleancrd");
482	LCS_DBF_HEX(2,setup,&card,sizeof(void*));
483
484	if (card->dev != NULL)
485		free_netdev(card->dev);
486	/* Cleanup channels. */
487	lcs_cleanup_channel(&card->write);
488	lcs_cleanup_channel(&card->read);
489}
490
491/**
492 * Start channel.
493 */
494static int
495lcs_start_channel(struct lcs_channel *channel)
496{
497	unsigned long flags;
498	int rc;
499
500	LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
501	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
502	rc = ccw_device_start(channel->ccwdev,
503			      channel->ccws + channel->io_idx, 0, 0,
504			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
505	if (rc == 0)
506		channel->state = LCS_CH_STATE_RUNNING;
507	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
508	if (rc) {
509		LCS_DBF_TEXT_(4,trace,"essh%s",
510			      dev_name(&channel->ccwdev->dev));
511		dev_err(&channel->ccwdev->dev,
512			"Starting an LCS device resulted in an error,"
513			" rc=%d!\n", rc);
514	}
515	return rc;
516}
517
518static int
519lcs_clear_channel(struct lcs_channel *channel)
520{
521	unsigned long flags;
522	int rc;
523
524	LCS_DBF_TEXT(4,trace,"clearch");
525	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
526	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
527	rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
528	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
529	if (rc) {
530		LCS_DBF_TEXT_(4, trace, "ecsc%s",
531			      dev_name(&channel->ccwdev->dev));
532		return rc;
533	}
534	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
535	channel->state = LCS_CH_STATE_STOPPED;
536	return rc;
537}
538
539
540/**
541 * Stop channel.
542 */
543static int
544lcs_stop_channel(struct lcs_channel *channel)
545{
546	unsigned long flags;
547	int rc;
548
549	if (channel->state == LCS_CH_STATE_STOPPED)
550		return 0;
551	LCS_DBF_TEXT(4,trace,"haltsch");
552	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
553	channel->state = LCS_CH_STATE_INIT;
554	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
555	rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
556	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
557	if (rc) {
558		LCS_DBF_TEXT_(4, trace, "ehsc%s",
559			      dev_name(&channel->ccwdev->dev));
560		return rc;
561	}
562	/* Asynchronous halt initialted. Wait for its completion. */
563	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
564	lcs_clear_channel(channel);
565	return 0;
566}
567
568/**
569 * start read and write channel
570 */
571static int
572lcs_start_channels(struct lcs_card *card)
573{
574	int rc;
575
576	LCS_DBF_TEXT(2, trace, "chstart");
577	/* start read channel */
578	rc = lcs_start_channel(&card->read);
579	if (rc)
580		return rc;
581	/* start write channel */
582	rc = lcs_start_channel(&card->write);
583	if (rc)
584		lcs_stop_channel(&card->read);
585	return rc;
586}
587
588/**
589 * stop read and write channel
590 */
591static int
592lcs_stop_channels(struct lcs_card *card)
593{
594	LCS_DBF_TEXT(2, trace, "chhalt");
595	lcs_stop_channel(&card->read);
596	lcs_stop_channel(&card->write);
597	return 0;
598}
599
600/**
601 * Get empty buffer.
602 */
603static struct lcs_buffer *
604__lcs_get_buffer(struct lcs_channel *channel)
605{
606	int index;
607
608	LCS_DBF_TEXT(5, trace, "_getbuff");
609	index = channel->io_idx;
610	do {
611		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
612			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
613			return channel->iob + index;
614		}
615		index = (index + 1) & (LCS_NUM_BUFFS - 1);
616	} while (index != channel->io_idx);
617	return NULL;
618}
619
620static struct lcs_buffer *
621lcs_get_buffer(struct lcs_channel *channel)
622{
623	struct lcs_buffer *buffer;
624	unsigned long flags;
625
626	LCS_DBF_TEXT(5, trace, "getbuff");
627	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
628	buffer = __lcs_get_buffer(channel);
629	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
630	return buffer;
631}
632
633/**
634 * Resume channel program if the channel is suspended.
635 */
636static int
637__lcs_resume_channel(struct lcs_channel *channel)
638{
639	int rc;
640
641	if (channel->state != LCS_CH_STATE_SUSPENDED)
642		return 0;
643	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
644		return 0;
645	LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
646	rc = ccw_device_resume(channel->ccwdev);
647	if (rc) {
648		LCS_DBF_TEXT_(4, trace, "ersc%s",
649			      dev_name(&channel->ccwdev->dev));
650		dev_err(&channel->ccwdev->dev,
651			"Sending data from the LCS device to the LAN failed"
652			" with rc=%d\n",rc);
653	} else
654		channel->state = LCS_CH_STATE_RUNNING;
655	return rc;
656
657}
658
659/**
660 * Make a buffer ready for processing.
661 */
662static inline void
663__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
664{
665	int prev, next;
666
667	LCS_DBF_TEXT(5, trace, "rdybits");
668	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
669	next = (index + 1) & (LCS_NUM_BUFFS - 1);
670	/* Check if we may clear the suspend bit of this buffer. */
671	if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
672		/* Check if we have to set the PCI bit. */
673		if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
674			/* Suspend bit of the previous buffer is not set. */
675			channel->ccws[index].flags |= CCW_FLAG_PCI;
676		/* Suspend bit of the next buffer is set. */
677		channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
678	}
679}
680
681static int
682lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
683{
684	unsigned long flags;
685	int index, rc;
686
687	LCS_DBF_TEXT(5, trace, "rdybuff");
688	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
689	       buffer->state != LCS_BUF_STATE_PROCESSED);
690	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
691	buffer->state = LCS_BUF_STATE_READY;
692	index = buffer - channel->iob;
693	/* Set length. */
694	channel->ccws[index].count = buffer->count;
695	/* Check relevant PCI/suspend bits. */
696	__lcs_ready_buffer_bits(channel, index);
697	rc = __lcs_resume_channel(channel);
698	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
699	return rc;
700}
701
702/**
703 * Mark the buffer as processed. Take care of the suspend bit
704 * of the previous buffer. This function is called from
705 * interrupt context, so the lock must not be taken.
706 */
707static int
708__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
709{
710	int index, prev, next;
711
712	LCS_DBF_TEXT(5, trace, "prcsbuff");
713	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
714	buffer->state = LCS_BUF_STATE_PROCESSED;
715	index = buffer - channel->iob;
716	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
717	next = (index + 1) & (LCS_NUM_BUFFS - 1);
718	/* Set the suspend bit and clear the PCI bit of this buffer. */
719	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
720	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
721	/* Check the suspend bit of the previous buffer. */
722	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
723		/*
724		 * Previous buffer is in state ready. It might have
725		 * happened in lcs_ready_buffer that the suspend bit
726		 * has not been cleared to avoid an endless loop.
727		 * Do it now.
728		 */
729		__lcs_ready_buffer_bits(channel, prev);
730	}
731	/* Clear PCI bit of next buffer. */
732	channel->ccws[next].flags &= ~CCW_FLAG_PCI;
733	return __lcs_resume_channel(channel);
734}
735
736/**
737 * Put a processed buffer back to state empty.
738 */
739static void
740lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
741{
742	unsigned long flags;
743
744	LCS_DBF_TEXT(5, trace, "relbuff");
745	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
746	       buffer->state != LCS_BUF_STATE_PROCESSED);
747	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
748	buffer->state = LCS_BUF_STATE_EMPTY;
749	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
750}
751
752/**
753 * Get buffer for a lan command.
754 */
755static struct lcs_buffer *
756lcs_get_lancmd(struct lcs_card *card, int count)
757{
758	struct lcs_buffer *buffer;
759	struct lcs_cmd *cmd;
760
761	LCS_DBF_TEXT(4, trace, "getlncmd");
762	/* Get buffer and wait if none is available. */
763	wait_event(card->write.wait_q,
764		   ((buffer = lcs_get_buffer(&card->write)) != NULL));
765	count += sizeof(struct lcs_header);
766	*(__u16 *)(buffer->data + count) = 0;
767	buffer->count = count + sizeof(__u16);
768	buffer->callback = lcs_release_buffer;
769	cmd = (struct lcs_cmd *) buffer->data;
770	cmd->offset = count;
771	cmd->type = LCS_FRAME_TYPE_CONTROL;
772	cmd->slot = 0;
773	return buffer;
774}
775
776
777static void
778lcs_get_reply(struct lcs_reply *reply)
779{
780	WARN_ON(atomic_read(&reply->refcnt) <= 0);
781	atomic_inc(&reply->refcnt);
782}
783
784static void
785lcs_put_reply(struct lcs_reply *reply)
786{
787        WARN_ON(atomic_read(&reply->refcnt) <= 0);
788        if (atomic_dec_and_test(&reply->refcnt)) {
789		kfree(reply);
790	}
791
792}
793
794static struct lcs_reply *
795lcs_alloc_reply(struct lcs_cmd *cmd)
796{
797	struct lcs_reply *reply;
798
799	LCS_DBF_TEXT(4, trace, "getreply");
800
801	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
802	if (!reply)
803		return NULL;
804	atomic_set(&reply->refcnt,1);
805	reply->sequence_no = cmd->sequence_no;
806	reply->received = 0;
807	reply->rc = 0;
808	init_waitqueue_head(&reply->wait_q);
809
810	return reply;
811}
812
813/**
814 * Notifier function for lancmd replies. Called from read irq.
815 */
816static void
817lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
818{
819	struct list_head *l, *n;
820	struct lcs_reply *reply;
821
822	LCS_DBF_TEXT(4, trace, "notiwait");
823	spin_lock(&card->lock);
824	list_for_each_safe(l, n, &card->lancmd_waiters) {
825		reply = list_entry(l, struct lcs_reply, list);
826		if (reply->sequence_no == cmd->sequence_no) {
827			lcs_get_reply(reply);
828			list_del_init(&reply->list);
829			if (reply->callback != NULL)
830				reply->callback(card, cmd);
831			reply->received = 1;
832			reply->rc = cmd->return_code;
833			wake_up(&reply->wait_q);
834			lcs_put_reply(reply);
835			break;
836		}
837	}
838	spin_unlock(&card->lock);
839}
840
841/**
842 * Emit buffer of a lan comand.
843 */
844static void
845lcs_lancmd_timeout(unsigned long data)
846{
847	struct lcs_reply *reply, *list_reply, *r;
848	unsigned long flags;
849
850	LCS_DBF_TEXT(4, trace, "timeout");
851	reply = (struct lcs_reply *) data;
852	spin_lock_irqsave(&reply->card->lock, flags);
853	list_for_each_entry_safe(list_reply, r,
854				 &reply->card->lancmd_waiters,list) {
855		if (reply == list_reply) {
856			lcs_get_reply(reply);
857			list_del_init(&reply->list);
858			spin_unlock_irqrestore(&reply->card->lock, flags);
859			reply->received = 1;
860			reply->rc = -ETIME;
861			wake_up(&reply->wait_q);
862			lcs_put_reply(reply);
863			return;
864		}
865	}
866	spin_unlock_irqrestore(&reply->card->lock, flags);
867}
868
869static int
870lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
871		void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
872{
873	struct lcs_reply *reply;
874	struct lcs_cmd *cmd;
875	struct timer_list timer;
876	unsigned long flags;
877	int rc;
878
879	LCS_DBF_TEXT(4, trace, "sendcmd");
880	cmd = (struct lcs_cmd *) buffer->data;
881	cmd->return_code = 0;
882	cmd->sequence_no = card->sequence_no++;
883	reply = lcs_alloc_reply(cmd);
884	if (!reply)
885		return -ENOMEM;
886	reply->callback = reply_callback;
887	reply->card = card;
888	spin_lock_irqsave(&card->lock, flags);
889	list_add_tail(&reply->list, &card->lancmd_waiters);
890	spin_unlock_irqrestore(&card->lock, flags);
891
892	buffer->callback = lcs_release_buffer;
893	rc = lcs_ready_buffer(&card->write, buffer);
894	if (rc)
895		return rc;
896	init_timer_on_stack(&timer);
897	timer.function = lcs_lancmd_timeout;
898	timer.data = (unsigned long) reply;
899	timer.expires = jiffies + HZ*card->lancmd_timeout;
900	add_timer(&timer);
901	wait_event(reply->wait_q, reply->received);
902	del_timer_sync(&timer);
903	LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
904	rc = reply->rc;
905	lcs_put_reply(reply);
906	return rc ? -EIO : 0;
907}
908
909/**
910 * LCS startup command
911 */
912static int
913lcs_send_startup(struct lcs_card *card, __u8 initiator)
914{
915	struct lcs_buffer *buffer;
916	struct lcs_cmd *cmd;
917
918	LCS_DBF_TEXT(2, trace, "startup");
919	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
920	cmd = (struct lcs_cmd *) buffer->data;
921	cmd->cmd_code = LCS_CMD_STARTUP;
922	cmd->initiator = initiator;
923	cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
924	return lcs_send_lancmd(card, buffer, NULL);
925}
926
927/**
928 * LCS shutdown command
929 */
930static int
931lcs_send_shutdown(struct lcs_card *card)
932{
933	struct lcs_buffer *buffer;
934	struct lcs_cmd *cmd;
935
936	LCS_DBF_TEXT(2, trace, "shutdown");
937	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
938	cmd = (struct lcs_cmd *) buffer->data;
939	cmd->cmd_code = LCS_CMD_SHUTDOWN;
940	cmd->initiator = LCS_INITIATOR_TCPIP;
941	return lcs_send_lancmd(card, buffer, NULL);
942}
943
944/**
945 * LCS lanstat command
946 */
947static void
948__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
949{
950	LCS_DBF_TEXT(2, trace, "statcb");
951	memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
952}
953
954static int
955lcs_send_lanstat(struct lcs_card *card)
956{
957	struct lcs_buffer *buffer;
958	struct lcs_cmd *cmd;
959
960	LCS_DBF_TEXT(2,trace, "cmdstat");
961	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
962	cmd = (struct lcs_cmd *) buffer->data;
963	/* Setup lanstat command. */
964	cmd->cmd_code = LCS_CMD_LANSTAT;
965	cmd->initiator = LCS_INITIATOR_TCPIP;
966	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
967	cmd->cmd.lcs_std_cmd.portno = card->portno;
968	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
969}
970
971/**
972 * send stoplan command
973 */
974static int
975lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
976{
977	struct lcs_buffer *buffer;
978	struct lcs_cmd *cmd;
979
980	LCS_DBF_TEXT(2, trace, "cmdstpln");
981	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
982	cmd = (struct lcs_cmd *) buffer->data;
983	cmd->cmd_code = LCS_CMD_STOPLAN;
984	cmd->initiator = initiator;
985	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
986	cmd->cmd.lcs_std_cmd.portno = card->portno;
987	return lcs_send_lancmd(card, buffer, NULL);
988}
989
990/**
991 * send startlan command
992 */
993static void
994__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
995{
996	LCS_DBF_TEXT(2, trace, "srtlancb");
997	card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
998	card->portno = cmd->cmd.lcs_std_cmd.portno;
999}
1000
1001static int
1002lcs_send_startlan(struct lcs_card *card, __u8 initiator)
1003{
1004	struct lcs_buffer *buffer;
1005	struct lcs_cmd *cmd;
1006
1007	LCS_DBF_TEXT(2, trace, "cmdstaln");
1008	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1009	cmd = (struct lcs_cmd *) buffer->data;
1010	cmd->cmd_code = LCS_CMD_STARTLAN;
1011	cmd->initiator = initiator;
1012	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
1013	cmd->cmd.lcs_std_cmd.portno = card->portno;
1014	return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
1015}
1016
1017#ifdef CONFIG_IP_MULTICAST
1018/**
1019 * send setipm command (Multicast)
1020 */
1021static int
1022lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1023{
1024	struct lcs_buffer *buffer;
1025	struct lcs_cmd *cmd;
1026
1027	LCS_DBF_TEXT(2, trace, "cmdsetim");
1028	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1029	cmd = (struct lcs_cmd *) buffer->data;
1030	cmd->cmd_code = LCS_CMD_SETIPM;
1031	cmd->initiator = LCS_INITIATOR_TCPIP;
1032	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1033	cmd->cmd.lcs_qipassist.portno = card->portno;
1034	cmd->cmd.lcs_qipassist.version = 4;
1035	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1036	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1037	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1038	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1039	return lcs_send_lancmd(card, buffer, NULL);
1040}
1041
1042/**
1043 * send delipm command (Multicast)
1044 */
1045static int
1046lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1047{
1048	struct lcs_buffer *buffer;
1049	struct lcs_cmd *cmd;
1050
1051	LCS_DBF_TEXT(2, trace, "cmddelim");
1052	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1053	cmd = (struct lcs_cmd *) buffer->data;
1054	cmd->cmd_code = LCS_CMD_DELIPM;
1055	cmd->initiator = LCS_INITIATOR_TCPIP;
1056	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1057	cmd->cmd.lcs_qipassist.portno = card->portno;
1058	cmd->cmd.lcs_qipassist.version = 4;
1059	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1060	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1061	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1062	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1063	return lcs_send_lancmd(card, buffer, NULL);
1064}
1065
1066/**
1067 * check if multicast is supported by LCS
1068 */
1069static void
1070__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
1071{
1072	LCS_DBF_TEXT(2, trace, "chkmccb");
1073	card->ip_assists_supported =
1074		cmd->cmd.lcs_qipassist.ip_assists_supported;
1075	card->ip_assists_enabled =
1076		cmd->cmd.lcs_qipassist.ip_assists_enabled;
1077}
1078
1079static int
1080lcs_check_multicast_support(struct lcs_card *card)
1081{
1082	struct lcs_buffer *buffer;
1083	struct lcs_cmd *cmd;
1084	int rc;
1085
1086	LCS_DBF_TEXT(2, trace, "cmdqipa");
1087	/* Send query ipassist. */
1088	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1089	cmd = (struct lcs_cmd *) buffer->data;
1090	cmd->cmd_code = LCS_CMD_QIPASSIST;
1091	cmd->initiator = LCS_INITIATOR_TCPIP;
1092	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1093	cmd->cmd.lcs_qipassist.portno = card->portno;
1094	cmd->cmd.lcs_qipassist.version = 4;
1095	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1096	rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
1097	if (rc != 0) {
1098		pr_err("Query IPAssist failed. Assuming unsupported!\n");
1099		return -EOPNOTSUPP;
1100	}
1101	if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
1102		return 0;
1103	return -EOPNOTSUPP;
1104}
1105
1106/**
1107 * set or del multicast address on LCS card
1108 */
1109static void
1110lcs_fix_multicast_list(struct lcs_card *card)
1111{
1112	struct list_head failed_list;
1113	struct lcs_ipm_list *ipm, *tmp;
1114	unsigned long flags;
1115	int rc;
1116
1117	LCS_DBF_TEXT(4,trace, "fixipm");
1118	INIT_LIST_HEAD(&failed_list);
1119	spin_lock_irqsave(&card->ipm_lock, flags);
1120list_modified:
1121	list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
1122		switch (ipm->ipm_state) {
1123		case LCS_IPM_STATE_SET_REQUIRED:
1124			/* del from ipm_list so noone else can tamper with
1125			 * this entry */
1126			list_del_init(&ipm->list);
1127			spin_unlock_irqrestore(&card->ipm_lock, flags);
1128			rc = lcs_send_setipm(card, ipm);
1129			spin_lock_irqsave(&card->ipm_lock, flags);
1130			if (rc) {
1131				pr_info("Adding multicast address failed."
1132					" Table possibly full!\n");
1133				/* store ipm in failed list -> will be added
1134				 * to ipm_list again, so a retry will be done
1135				 * during the next call of this function */
1136				list_add_tail(&ipm->list, &failed_list);
1137			} else {
1138				ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
1139				/* re-insert into ipm_list */
1140				list_add_tail(&ipm->list, &card->ipm_list);
1141			}
1142			goto list_modified;
1143		case LCS_IPM_STATE_DEL_REQUIRED:
1144			list_del(&ipm->list);
1145			spin_unlock_irqrestore(&card->ipm_lock, flags);
1146			lcs_send_delipm(card, ipm);
1147			spin_lock_irqsave(&card->ipm_lock, flags);
1148			kfree(ipm);
1149			goto list_modified;
1150		case LCS_IPM_STATE_ON_CARD:
1151			break;
1152		}
1153	}
1154	/* re-insert all entries from the failed_list into ipm_list */
1155	list_for_each_entry_safe(ipm, tmp, &failed_list, list)
1156		list_move_tail(&ipm->list, &card->ipm_list);
1157
1158	spin_unlock_irqrestore(&card->ipm_lock, flags);
1159}
1160
1161/**
1162 * get mac address for the relevant Multicast address
1163 */
1164static void
1165lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
1166{
1167	LCS_DBF_TEXT(4,trace, "getmac");
1168	if (dev->type == ARPHRD_IEEE802_TR)
1169		ip_tr_mc_map(ipm, mac);
1170	else
1171		ip_eth_mc_map(ipm, mac);
1172}
1173
1174/**
1175 * function called by net device to handle multicast address relevant things
1176 */
1177static inline void
1178lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1179{
1180	struct ip_mc_list *im4;
1181	struct list_head *l;
1182	struct lcs_ipm_list *ipm;
1183	unsigned long flags;
1184	char buf[MAX_ADDR_LEN];
1185
1186	LCS_DBF_TEXT(4, trace, "remmclst");
1187	spin_lock_irqsave(&card->ipm_lock, flags);
1188	list_for_each(l, &card->ipm_list) {
1189		ipm = list_entry(l, struct lcs_ipm_list, list);
1190		for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
1191			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1192			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1193			     (memcmp(buf, &ipm->ipm.mac_addr,
1194				     LCS_MAC_LENGTH) == 0) )
1195				break;
1196		}
1197		if (im4 == NULL)
1198			ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
1199	}
1200	spin_unlock_irqrestore(&card->ipm_lock, flags);
1201}
1202
1203static inline struct lcs_ipm_list *
1204lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
1205{
1206	struct lcs_ipm_list *tmp, *ipm = NULL;
1207	struct list_head *l;
1208	unsigned long flags;
1209
1210	LCS_DBF_TEXT(4, trace, "chkmcent");
1211	spin_lock_irqsave(&card->ipm_lock, flags);
1212	list_for_each(l, &card->ipm_list) {
1213		tmp = list_entry(l, struct lcs_ipm_list, list);
1214		if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
1215		     (memcmp(buf, &tmp->ipm.mac_addr,
1216			     LCS_MAC_LENGTH) == 0) ) {
1217			ipm = tmp;
1218			break;
1219		}
1220	}
1221	spin_unlock_irqrestore(&card->ipm_lock, flags);
1222	return ipm;
1223}
1224
1225static inline void
1226lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1227{
1228
1229	struct ip_mc_list *im4;
1230	struct lcs_ipm_list *ipm;
1231	char buf[MAX_ADDR_LEN];
1232	unsigned long flags;
1233
1234	LCS_DBF_TEXT(4, trace, "setmclst");
1235	for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1236		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1237		ipm = lcs_check_addr_entry(card, im4, buf);
1238		if (ipm != NULL)
1239			continue;	/* Address already in list. */
1240		ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
1241		if (ipm == NULL) {
1242			pr_info("Not enough memory to add"
1243				" new multicast entry!\n");
1244			break;
1245		}
1246		memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
1247		ipm->ipm.ip_addr = im4->multiaddr;
1248		ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
1249		spin_lock_irqsave(&card->ipm_lock, flags);
1250		LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
1251		list_add(&ipm->list, &card->ipm_list);
1252		spin_unlock_irqrestore(&card->ipm_lock, flags);
1253	}
1254}
1255
1256static int
1257lcs_register_mc_addresses(void *data)
1258{
1259	struct lcs_card *card;
1260	struct in_device *in4_dev;
1261
1262	card = (struct lcs_card *) data;
1263
1264	if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
1265		return 0;
1266	LCS_DBF_TEXT(4, trace, "regmulti");
1267
1268	in4_dev = in_dev_get(card->dev);
1269	if (in4_dev == NULL)
1270		goto out;
1271	read_lock(&in4_dev->mc_list_lock);
1272	lcs_remove_mc_addresses(card,in4_dev);
1273	lcs_set_mc_addresses(card, in4_dev);
1274	read_unlock(&in4_dev->mc_list_lock);
1275	in_dev_put(in4_dev);
1276
1277	netif_carrier_off(card->dev);
1278	netif_tx_disable(card->dev);
1279	wait_event(card->write.wait_q,
1280			(card->write.state != LCS_CH_STATE_RUNNING));
1281	lcs_fix_multicast_list(card);
1282	if (card->state == DEV_STATE_UP) {
1283		netif_carrier_on(card->dev);
1284		netif_wake_queue(card->dev);
1285	}
1286out:
1287	lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1288	return 0;
1289}
1290#endif /* CONFIG_IP_MULTICAST */
1291
1292/**
1293 * function called by net device to
1294 * handle multicast address relevant things
1295 */
1296static void
1297lcs_set_multicast_list(struct net_device *dev)
1298{
1299#ifdef CONFIG_IP_MULTICAST
1300        struct lcs_card *card;
1301
1302        LCS_DBF_TEXT(4, trace, "setmulti");
1303        card = (struct lcs_card *) dev->ml_priv;
1304
1305        if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1306		schedule_work(&card->kernel_thread_starter);
1307#endif /* CONFIG_IP_MULTICAST */
1308}
1309
1310static long
1311lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1312{
1313	if (!IS_ERR(irb))
1314		return 0;
1315
1316	switch (PTR_ERR(irb)) {
1317	case -EIO:
1318		dev_warn(&cdev->dev,
1319			"An I/O-error occurred on the LCS device\n");
1320		LCS_DBF_TEXT(2, trace, "ckirberr");
1321		LCS_DBF_TEXT_(2, trace, "  rc%d", -EIO);
1322		break;
1323	case -ETIMEDOUT:
1324		dev_warn(&cdev->dev,
1325			"A command timed out on the LCS device\n");
1326		LCS_DBF_TEXT(2, trace, "ckirberr");
1327		LCS_DBF_TEXT_(2, trace, "  rc%d", -ETIMEDOUT);
1328		break;
1329	default:
1330		dev_warn(&cdev->dev,
1331			"An error occurred on the LCS device, rc=%ld\n",
1332			PTR_ERR(irb));
1333		LCS_DBF_TEXT(2, trace, "ckirberr");
1334		LCS_DBF_TEXT(2, trace, "  rc???");
1335	}
1336	return PTR_ERR(irb);
1337}
1338
1339static int
1340lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1341{
1342	int dstat, cstat;
1343	char *sense;
1344
1345	sense = (char *) irb->ecw;
1346	cstat = irb->scsw.cmd.cstat;
1347	dstat = irb->scsw.cmd.dstat;
1348
1349	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1350		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1351		     SCHN_STAT_PROT_CHECK   | SCHN_STAT_PROG_CHECK)) {
1352		LCS_DBF_TEXT(2, trace, "CGENCHK");
1353		return 1;
1354	}
1355	if (dstat & DEV_STAT_UNIT_CHECK) {
1356		if (sense[LCS_SENSE_BYTE_1] &
1357		    LCS_SENSE_RESETTING_EVENT) {
1358			LCS_DBF_TEXT(2, trace, "REVIND");
1359			return 1;
1360		}
1361		if (sense[LCS_SENSE_BYTE_0] &
1362		    LCS_SENSE_CMD_REJECT) {
1363			LCS_DBF_TEXT(2, trace, "CMDREJ");
1364			return 0;
1365		}
1366		if ((!sense[LCS_SENSE_BYTE_0]) &&
1367		    (!sense[LCS_SENSE_BYTE_1]) &&
1368		    (!sense[LCS_SENSE_BYTE_2]) &&
1369		    (!sense[LCS_SENSE_BYTE_3])) {
1370			LCS_DBF_TEXT(2, trace, "ZEROSEN");
1371			return 0;
1372		}
1373		LCS_DBF_TEXT(2, trace, "DGENCHK");
1374		return 1;
1375	}
1376	return 0;
1377}
1378
1379static void
1380lcs_schedule_recovery(struct lcs_card *card)
1381{
1382	LCS_DBF_TEXT(2, trace, "startrec");
1383	if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
1384		schedule_work(&card->kernel_thread_starter);
1385}
1386
1387/**
1388 * IRQ Handler for LCS channels
1389 */
1390static void
1391lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1392{
1393	struct lcs_card *card;
1394	struct lcs_channel *channel;
1395	int rc, index;
1396	int cstat, dstat;
1397
1398	if (lcs_check_irb_error(cdev, irb))
1399		return;
1400
1401	card = CARD_FROM_DEV(cdev);
1402	if (card->read.ccwdev == cdev)
1403		channel = &card->read;
1404	else
1405		channel = &card->write;
1406
1407	cstat = irb->scsw.cmd.cstat;
1408	dstat = irb->scsw.cmd.dstat;
1409	LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
1410	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1411		      irb->scsw.cmd.dstat);
1412	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
1413		      irb->scsw.cmd.actl);
1414
1415	/* Check for channel and device errors presented */
1416	rc = lcs_get_problem(cdev, irb);
1417	if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
1418		dev_warn(&cdev->dev,
1419			"The LCS device stopped because of an error,"
1420			" dstat=0x%X, cstat=0x%X \n",
1421			    dstat, cstat);
1422		if (rc) {
1423			channel->state = LCS_CH_STATE_ERROR;
1424		}
1425	}
1426	if (channel->state == LCS_CH_STATE_ERROR) {
1427		lcs_schedule_recovery(card);
1428		wake_up(&card->wait_q);
1429		return;
1430	}
1431	/* How far in the ccw chain have we processed? */
1432	if ((channel->state != LCS_CH_STATE_INIT) &&
1433	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1434	    (irb->scsw.cmd.cpa != 0)) {
1435		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
1436			- channel->ccws;
1437		if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
1438		    (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
1439			/* Bloody io subsystem tells us lies about cpa... */
1440			index = (index - 1) & (LCS_NUM_BUFFS - 1);
1441		while (channel->io_idx != index) {
1442			__lcs_processed_buffer(channel,
1443					       channel->iob + channel->io_idx);
1444			channel->io_idx =
1445				(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
1446		}
1447	}
1448
1449	if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
1450	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
1451	    (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
1452		/* Mark channel as stopped. */
1453		channel->state = LCS_CH_STATE_STOPPED;
1454	else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
1455		/* CCW execution stopped on a suspend bit. */
1456		channel->state = LCS_CH_STATE_SUSPENDED;
1457	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1458		if (irb->scsw.cmd.cc != 0) {
1459			ccw_device_halt(channel->ccwdev, (addr_t) channel);
1460			return;
1461		}
1462		/* The channel has been stopped by halt_IO. */
1463		channel->state = LCS_CH_STATE_HALTED;
1464	}
1465	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
1466		channel->state = LCS_CH_STATE_CLEARED;
1467	/* Do the rest in the tasklet. */
1468	tasklet_schedule(&channel->irq_tasklet);
1469}
1470
1471/**
1472 * Tasklet for IRQ handler
1473 */
1474static void
1475lcs_tasklet(unsigned long data)
1476{
1477	unsigned long flags;
1478	struct lcs_channel *channel;
1479	struct lcs_buffer *iob;
1480	int buf_idx;
1481	int rc;
1482
1483	channel = (struct lcs_channel *) data;
1484	LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
1485
1486	/* Check for processed buffers. */
1487	iob = channel->iob;
1488	buf_idx = channel->buf_idx;
1489	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
1490		/* Do the callback thing. */
1491		if (iob[buf_idx].callback != NULL)
1492			iob[buf_idx].callback(channel, iob + buf_idx);
1493		buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
1494	}
1495	channel->buf_idx = buf_idx;
1496
1497	if (channel->state == LCS_CH_STATE_STOPPED)
1498		rc = lcs_start_channel(channel);
1499	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1500	if (channel->state == LCS_CH_STATE_SUSPENDED &&
1501	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) {
1502		rc = __lcs_resume_channel(channel);
1503	}
1504	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1505
1506	/* Something happened on the channel. Wake up waiters. */
1507	wake_up(&channel->wait_q);
1508}
1509
1510/**
1511 * Finish current tx buffer and make it ready for transmit.
1512 */
1513static void
1514__lcs_emit_txbuffer(struct lcs_card *card)
1515{
1516	LCS_DBF_TEXT(5, trace, "emittx");
1517	*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
1518	card->tx_buffer->count += 2;
1519	lcs_ready_buffer(&card->write, card->tx_buffer);
1520	card->tx_buffer = NULL;
1521	card->tx_emitted++;
1522}
1523
1524/**
1525 * Callback for finished tx buffers.
1526 */
1527static void
1528lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1529{
1530	struct lcs_card *card;
1531
1532	LCS_DBF_TEXT(5, trace, "txbuffcb");
1533	/* Put buffer back to pool. */
1534	lcs_release_buffer(channel, buffer);
1535	card = container_of(channel, struct lcs_card, write);
1536	if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
1537		netif_wake_queue(card->dev);
1538	spin_lock(&card->lock);
1539	card->tx_emitted--;
1540	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1541		/*
1542		 * Last running tx buffer has finished. Submit partially
1543		 * filled current buffer.
1544		 */
1545		__lcs_emit_txbuffer(card);
1546	spin_unlock(&card->lock);
1547}
1548
1549/**
1550 * Packet transmit function called by network stack
1551 */
1552static int
1553__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1554		 struct net_device *dev)
1555{
1556	struct lcs_header *header;
1557	int rc = NETDEV_TX_OK;
1558
1559	LCS_DBF_TEXT(5, trace, "hardxmit");
1560	if (skb == NULL) {
1561		card->stats.tx_dropped++;
1562		card->stats.tx_errors++;
1563		return NETDEV_TX_OK;
1564	}
1565	if (card->state != DEV_STATE_UP) {
1566		dev_kfree_skb(skb);
1567		card->stats.tx_dropped++;
1568		card->stats.tx_errors++;
1569		card->stats.tx_carrier_errors++;
1570		return NETDEV_TX_OK;
1571	}
1572	if (skb->protocol == htons(ETH_P_IPV6)) {
1573		dev_kfree_skb(skb);
1574		return NETDEV_TX_OK;
1575	}
1576	netif_stop_queue(card->dev);
1577	spin_lock(&card->lock);
1578	if (card->tx_buffer != NULL &&
1579	    card->tx_buffer->count + sizeof(struct lcs_header) +
1580	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
1581		/* skb too big for current tx buffer. */
1582		__lcs_emit_txbuffer(card);
1583	if (card->tx_buffer == NULL) {
1584		/* Get new tx buffer */
1585		card->tx_buffer = lcs_get_buffer(&card->write);
1586		if (card->tx_buffer == NULL) {
1587			card->stats.tx_dropped++;
1588			rc = NETDEV_TX_BUSY;
1589			goto out;
1590		}
1591		card->tx_buffer->callback = lcs_txbuffer_cb;
1592		card->tx_buffer->count = 0;
1593	}
1594	header = (struct lcs_header *)
1595		(card->tx_buffer->data + card->tx_buffer->count);
1596	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
1597	header->offset = card->tx_buffer->count;
1598	header->type = card->lan_type;
1599	header->slot = card->portno;
1600	skb_copy_from_linear_data(skb, header + 1, skb->len);
1601	spin_unlock(&card->lock);
1602	card->stats.tx_bytes += skb->len;
1603	card->stats.tx_packets++;
1604	dev_kfree_skb(skb);
1605	netif_wake_queue(card->dev);
1606	spin_lock(&card->lock);
1607	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1608		/* If this is the first tx buffer emit it immediately. */
1609		__lcs_emit_txbuffer(card);
1610out:
1611	spin_unlock(&card->lock);
1612	return rc;
1613}
1614
1615static int
1616lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
1617{
1618	struct lcs_card *card;
1619	int rc;
1620
1621	LCS_DBF_TEXT(5, trace, "pktxmit");
1622	card = (struct lcs_card *) dev->ml_priv;
1623	rc = __lcs_start_xmit(card, skb, dev);
1624	return rc;
1625}
1626
1627/**
1628 * send startlan and lanstat command to make LCS device ready
1629 */
1630static int
1631lcs_startlan_auto(struct lcs_card *card)
1632{
1633	int rc;
1634
1635	LCS_DBF_TEXT(2, trace, "strtauto");
1636#ifdef CONFIG_NET_ETHERNET
1637	card->lan_type = LCS_FRAME_TYPE_ENET;
1638	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1639	if (rc == 0)
1640		return 0;
1641
1642#endif
1643#ifdef CONFIG_TR
1644	card->lan_type = LCS_FRAME_TYPE_TR;
1645	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1646	if (rc == 0)
1647		return 0;
1648#endif
1649#ifdef CONFIG_FDDI
1650	card->lan_type = LCS_FRAME_TYPE_FDDI;
1651	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1652	if (rc == 0)
1653		return 0;
1654#endif
1655	return -EIO;
1656}
1657
1658static int
1659lcs_startlan(struct lcs_card *card)
1660{
1661	int rc, i;
1662
1663	LCS_DBF_TEXT(2, trace, "startlan");
1664	rc = 0;
1665	if (card->portno != LCS_INVALID_PORT_NO) {
1666		if (card->lan_type == LCS_FRAME_TYPE_AUTO)
1667			rc = lcs_startlan_auto(card);
1668		else
1669			rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1670	} else {
1671                for (i = 0; i <= 16; i++) {
1672                        card->portno = i;
1673                        if (card->lan_type != LCS_FRAME_TYPE_AUTO)
1674                                rc = lcs_send_startlan(card,
1675                                                       LCS_INITIATOR_TCPIP);
1676                        else
1677                                /* autodetecting lan type */
1678                                rc = lcs_startlan_auto(card);
1679                        if (rc == 0)
1680                                break;
1681                }
1682        }
1683	if (rc == 0)
1684		return lcs_send_lanstat(card);
1685	return rc;
1686}
1687
1688/**
1689 * LCS detect function
1690 * setup channels and make them I/O ready
1691 */
1692static int
1693lcs_detect(struct lcs_card *card)
1694{
1695	int rc = 0;
1696
1697	LCS_DBF_TEXT(2, setup, "lcsdetct");
1698	/* start/reset card */
1699	if (card->dev)
1700		netif_stop_queue(card->dev);
1701	rc = lcs_stop_channels(card);
1702	if (rc == 0) {
1703		rc = lcs_start_channels(card);
1704		if (rc == 0) {
1705			rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
1706			if (rc == 0)
1707				rc = lcs_startlan(card);
1708		}
1709	}
1710	if (rc == 0) {
1711		card->state = DEV_STATE_UP;
1712	} else {
1713		card->state = DEV_STATE_DOWN;
1714		card->write.state = LCS_CH_STATE_INIT;
1715		card->read.state =  LCS_CH_STATE_INIT;
1716	}
1717	return rc;
1718}
1719
1720/**
1721 * LCS Stop card
1722 */
1723static int
1724lcs_stopcard(struct lcs_card *card)
1725{
1726	int rc;
1727
1728	LCS_DBF_TEXT(3, setup, "stopcard");
1729
1730	if (card->read.state != LCS_CH_STATE_STOPPED &&
1731	    card->write.state != LCS_CH_STATE_STOPPED &&
1732	    card->read.state != LCS_CH_STATE_ERROR &&
1733	    card->write.state != LCS_CH_STATE_ERROR &&
1734	    card->state == DEV_STATE_UP) {
1735		lcs_clear_multicast_list(card);
1736		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
1737		rc = lcs_send_shutdown(card);
1738	}
1739	rc = lcs_stop_channels(card);
1740	card->state = DEV_STATE_DOWN;
1741
1742	return rc;
1743}
1744
1745/**
1746 * Kernel Thread helper functions for LGW initiated commands
1747 */
1748static void
1749lcs_start_kernel_thread(struct work_struct *work)
1750{
1751	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
1752	LCS_DBF_TEXT(5, trace, "krnthrd");
1753	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
1754		kthread_run(lcs_recovery, card, "lcs_recover");
1755#ifdef CONFIG_IP_MULTICAST
1756	if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
1757		kthread_run(lcs_register_mc_addresses, card, "regipm");
1758#endif
1759}
1760
1761/**
1762 * Process control frames.
1763 */
1764static void
1765lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1766{
1767	LCS_DBF_TEXT(5, trace, "getctrl");
1768	if (cmd->initiator == LCS_INITIATOR_LGW) {
1769		switch(cmd->cmd_code) {
1770		case LCS_CMD_STARTUP:
1771		case LCS_CMD_STARTLAN:
1772			lcs_schedule_recovery(card);
1773			break;
1774		case LCS_CMD_STOPLAN:
1775			pr_warning("Stoplan for %s initiated by LGW.\n",
1776				   card->dev->name);
1777			if (card->dev)
1778				netif_carrier_off(card->dev);
1779			break;
1780		default:
1781			LCS_DBF_TEXT(5, trace, "noLGWcmd");
1782			break;
1783		}
1784	} else
1785		lcs_notify_lancmd_waiters(card, cmd);
1786}
1787
1788/**
1789 * Unpack network packet.
1790 */
1791static void
1792lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1793{
1794	struct sk_buff *skb;
1795
1796	LCS_DBF_TEXT(5, trace, "getskb");
1797	if (card->dev == NULL ||
1798	    card->state != DEV_STATE_UP)
1799		/* The card isn't up. Ignore the packet. */
1800		return;
1801
1802	skb = dev_alloc_skb(skb_len);
1803	if (skb == NULL) {
1804		dev_err(&card->dev->dev,
1805			" Allocating a socket buffer to interface %s failed\n",
1806			  card->dev->name);
1807		card->stats.rx_dropped++;
1808		return;
1809	}
1810	memcpy(skb_put(skb, skb_len), skb_data, skb_len);
1811	skb->protocol =	card->lan_type_trans(skb, card->dev);
1812	card->stats.rx_bytes += skb_len;
1813	card->stats.rx_packets++;
1814	if (skb->protocol == htons(ETH_P_802_2))
1815		*((__u32 *)skb->cb) = ++card->pkt_seq;
1816	netif_rx(skb);
1817}
1818
1819/**
1820 * LCS main routine to get packets and lancmd replies from the buffers
1821 */
1822static void
1823lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1824{
1825	struct lcs_card *card;
1826	struct lcs_header *lcs_hdr;
1827	__u16 offset;
1828
1829	LCS_DBF_TEXT(5, trace, "lcsgtpkt");
1830	lcs_hdr = (struct lcs_header *) buffer->data;
1831	if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
1832		LCS_DBF_TEXT(4, trace, "-eiogpkt");
1833		return;
1834	}
1835	card = container_of(channel, struct lcs_card, read);
1836	offset = 0;
1837	while (lcs_hdr->offset != 0) {
1838		if (lcs_hdr->offset <= 0 ||
1839		    lcs_hdr->offset > LCS_IOBUFFERSIZE ||
1840		    lcs_hdr->offset < offset) {
1841			/* Offset invalid. */
1842			card->stats.rx_length_errors++;
1843			card->stats.rx_errors++;
1844			return;
1845		}
1846		/* What kind of frame is it? */
1847		if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
1848			/* Control frame. */
1849			lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
1850		else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
1851			 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
1852			 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
1853			/* Normal network packet. */
1854			lcs_get_skb(card, (char *)(lcs_hdr + 1),
1855				    lcs_hdr->offset - offset -
1856				    sizeof(struct lcs_header));
1857		else
1858			/* Unknown frame type. */
1859			;
1860		/* Proceed to next frame. */
1861		offset = lcs_hdr->offset;
1862		lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
1863		lcs_hdr = (struct lcs_header *) (buffer->data + offset);
1864	}
1865	/* The buffer is now empty. Make it ready again. */
1866	lcs_ready_buffer(&card->read, buffer);
1867}
1868
1869/**
1870 * get network statistics for ifconfig and other user programs
1871 */
1872static struct net_device_stats *
1873lcs_getstats(struct net_device *dev)
1874{
1875	struct lcs_card *card;
1876
1877	LCS_DBF_TEXT(4, trace, "netstats");
1878	card = (struct lcs_card *) dev->ml_priv;
1879	return &card->stats;
1880}
1881
1882static int
1883lcs_stop_device(struct net_device *dev)
1884{
1885	struct lcs_card *card;
1886	int rc;
1887
1888	LCS_DBF_TEXT(2, trace, "stopdev");
1889	card   = (struct lcs_card *) dev->ml_priv;
1890	netif_carrier_off(dev);
1891	netif_tx_disable(dev);
1892	dev->flags &= ~IFF_UP;
1893	wait_event(card->write.wait_q,
1894		(card->write.state != LCS_CH_STATE_RUNNING));
1895	rc = lcs_stopcard(card);
1896	if (rc)
1897		dev_err(&card->dev->dev,
1898			" Shutting down the LCS device failed\n ");
1899	return rc;
1900}
1901
1902static int
1903lcs_open_device(struct net_device *dev)
1904{
1905	struct lcs_card *card;
1906	int rc;
1907
1908	LCS_DBF_TEXT(2, trace, "opendev");
1909	card = (struct lcs_card *) dev->ml_priv;
1910	/* initialize statistics */
1911	rc = lcs_detect(card);
1912	if (rc) {
1913		pr_err("Error in opening device!\n");
1914
1915	} else {
1916		dev->flags |= IFF_UP;
1917		netif_carrier_on(dev);
1918		netif_wake_queue(dev);
1919		card->state = DEV_STATE_UP;
1920	}
1921	return rc;
1922}
1923
1924/**
1925 * show function for portno called by cat or similar things
1926 */
1927static ssize_t
1928lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
1929{
1930        struct lcs_card *card;
1931
1932	card = dev_get_drvdata(dev);
1933
1934        if (!card)
1935                return 0;
1936
1937        return sprintf(buf, "%d\n", card->portno);
1938}
1939
1940/**
1941 * store the value which is piped to file portno
1942 */
1943static ssize_t
1944lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1945{
1946        struct lcs_card *card;
1947        int value;
1948
1949	card = dev_get_drvdata(dev);
1950
1951        if (!card)
1952                return 0;
1953
1954        sscanf(buf, "%u", &value);
1955        /* TODO: sanity checks */
1956        card->portno = value;
1957
1958        return count;
1959
1960}
1961
1962static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1963
1964const char *lcs_type[] = {
1965	"not a channel",
1966	"2216 parallel",
1967	"2216 channel",
1968	"OSA LCS card",
1969	"unknown channel type",
1970	"unsupported channel type",
1971};
1972
1973static ssize_t
1974lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1975{
1976	struct ccwgroup_device *cgdev;
1977
1978	cgdev = to_ccwgroupdev(dev);
1979	if (!cgdev)
1980		return -ENODEV;
1981
1982	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1983}
1984
1985static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
1986
1987static ssize_t
1988lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
1989{
1990	struct lcs_card *card;
1991
1992	card = dev_get_drvdata(dev);
1993
1994	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
1995}
1996
1997static ssize_t
1998lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1999{
2000        struct lcs_card *card;
2001        int value;
2002
2003	card = dev_get_drvdata(dev);
2004
2005        if (!card)
2006                return 0;
2007
2008        sscanf(buf, "%u", &value);
2009        /* TODO: sanity checks */
2010        card->lancmd_timeout = value;
2011
2012        return count;
2013
2014}
2015
2016static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
2017
2018static ssize_t
2019lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
2020		      const char *buf, size_t count)
2021{
2022	struct lcs_card *card = dev_get_drvdata(dev);
2023	char *tmp;
2024	int i;
2025
2026	if (!card)
2027		return -EINVAL;
2028	if (card->state != DEV_STATE_UP)
2029		return -EPERM;
2030	i = simple_strtoul(buf, &tmp, 16);
2031	if (i == 1)
2032		lcs_schedule_recovery(card);
2033	return count;
2034}
2035
2036static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
2037
2038static struct attribute * lcs_attrs[] = {
2039	&dev_attr_portno.attr,
2040	&dev_attr_type.attr,
2041	&dev_attr_lancmd_timeout.attr,
2042	&dev_attr_recover.attr,
2043	NULL,
2044};
2045
2046static struct attribute_group lcs_attr_group = {
2047	.attrs = lcs_attrs,
2048};
2049
2050/**
2051 * lcs_probe_device is called on establishing a new ccwgroup_device.
2052 */
2053static int
2054lcs_probe_device(struct ccwgroup_device *ccwgdev)
2055{
2056	struct lcs_card *card;
2057	int ret;
2058
2059	if (!get_device(&ccwgdev->dev))
2060		return -ENODEV;
2061
2062	LCS_DBF_TEXT(2, setup, "add_dev");
2063        card = lcs_alloc_card();
2064        if (!card) {
2065		LCS_DBF_TEXT_(2, setup, "  rc%d", -ENOMEM);
2066		put_device(&ccwgdev->dev);
2067                return -ENOMEM;
2068        }
2069	ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2070	if (ret) {
2071		lcs_free_card(card);
2072		put_device(&ccwgdev->dev);
2073		return ret;
2074        }
2075	dev_set_drvdata(&ccwgdev->dev, card);
2076	ccwgdev->cdev[0]->handler = lcs_irq;
2077	ccwgdev->cdev[1]->handler = lcs_irq;
2078	card->gdev = ccwgdev;
2079	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
2080	card->thread_start_mask = 0;
2081	card->thread_allowed_mask = 0;
2082	card->thread_running_mask = 0;
2083        return 0;
2084}
2085
2086static int
2087lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2088{
2089	struct lcs_card *card;
2090
2091	LCS_DBF_TEXT(2, setup, "regnetdv");
2092	card = dev_get_drvdata(&ccwgdev->dev);
2093	if (card->dev->reg_state != NETREG_UNINITIALIZED)
2094		return 0;
2095	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
2096	return register_netdev(card->dev);
2097}
2098
2099/**
2100 * lcs_new_device will be called by setting the group device online.
2101 */
2102static const struct net_device_ops lcs_netdev_ops = {
2103	.ndo_open		= lcs_open_device,
2104	.ndo_stop		= lcs_stop_device,
2105	.ndo_get_stats		= lcs_getstats,
2106	.ndo_start_xmit		= lcs_start_xmit,
2107};
2108
2109static const struct net_device_ops lcs_mc_netdev_ops = {
2110	.ndo_open		= lcs_open_device,
2111	.ndo_stop		= lcs_stop_device,
2112	.ndo_get_stats		= lcs_getstats,
2113	.ndo_start_xmit		= lcs_start_xmit,
2114	.ndo_set_multicast_list = lcs_set_multicast_list,
2115};
2116
2117static int
2118lcs_new_device(struct ccwgroup_device *ccwgdev)
2119{
2120	struct  lcs_card *card;
2121	struct net_device *dev=NULL;
2122	enum lcs_dev_states recover_state;
2123	int rc;
2124
2125	card = dev_get_drvdata(&ccwgdev->dev);
2126	if (!card)
2127		return -ENODEV;
2128
2129	LCS_DBF_TEXT(2, setup, "newdev");
2130	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2131	card->read.ccwdev  = ccwgdev->cdev[0];
2132	card->write.ccwdev = ccwgdev->cdev[1];
2133
2134	recover_state = card->state;
2135	rc = ccw_device_set_online(card->read.ccwdev);
2136	if (rc)
2137		goto out_err;
2138	rc = ccw_device_set_online(card->write.ccwdev);
2139	if (rc)
2140		goto out_werr;
2141
2142	LCS_DBF_TEXT(3, setup, "lcsnewdv");
2143
2144	lcs_setup_card(card);
2145	rc = lcs_detect(card);
2146	if (rc) {
2147		LCS_DBF_TEXT(2, setup, "dtctfail");
2148		dev_err(&card->dev->dev,
2149			"Detecting a network adapter for LCS devices"
2150			" failed with rc=%d (0x%x)\n", rc, rc);
2151		lcs_stopcard(card);
2152		goto out;
2153	}
2154	if (card->dev) {
2155		LCS_DBF_TEXT(2, setup, "samedev");
2156		LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2157		goto netdev_out;
2158	}
2159	switch (card->lan_type) {
2160#ifdef CONFIG_NET_ETHERNET
2161	case LCS_FRAME_TYPE_ENET:
2162		card->lan_type_trans = eth_type_trans;
2163		dev = alloc_etherdev(0);
2164		break;
2165#endif
2166#ifdef CONFIG_TR
2167	case LCS_FRAME_TYPE_TR:
2168		card->lan_type_trans = tr_type_trans;
2169		dev = alloc_trdev(0);
2170		break;
2171#endif
2172#ifdef CONFIG_FDDI
2173	case LCS_FRAME_TYPE_FDDI:
2174		card->lan_type_trans = fddi_type_trans;
2175		dev = alloc_fddidev(0);
2176		break;
2177#endif
2178	default:
2179		LCS_DBF_TEXT(3, setup, "errinit");
2180		pr_err(" Initialization failed\n");
2181		goto out;
2182	}
2183	if (!dev)
2184		goto out;
2185	card->dev = dev;
2186	card->dev->ml_priv = card;
2187	card->dev->netdev_ops = &lcs_netdev_ops;
2188	memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2189#ifdef CONFIG_IP_MULTICAST
2190	if (!lcs_check_multicast_support(card))
2191		card->dev->netdev_ops = &lcs_mc_netdev_ops;
2192#endif
2193netdev_out:
2194	lcs_set_allowed_threads(card,0xffffffff);
2195	if (recover_state == DEV_STATE_RECOVER) {
2196		lcs_set_multicast_list(card->dev);
2197		card->dev->flags |= IFF_UP;
2198		netif_carrier_on(card->dev);
2199		netif_wake_queue(card->dev);
2200		card->state = DEV_STATE_UP;
2201	} else {
2202		lcs_stopcard(card);
2203	}
2204
2205	if (lcs_register_netdev(ccwgdev) != 0)
2206		goto out;
2207
2208	/* Print out supported assists: IPv6 */
2209	pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
2210		(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
2211		"with" : "without");
2212	/* Print out supported assist: Multicast */
2213	pr_info("LCS device %s %s Multicast support\n", card->dev->name,
2214		(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
2215		"with" : "without");
2216	return 0;
2217out:
2218
2219	ccw_device_set_offline(card->write.ccwdev);
2220out_werr:
2221	ccw_device_set_offline(card->read.ccwdev);
2222out_err:
2223	return -ENODEV;
2224}
2225
2226/**
2227 * lcs_shutdown_device, called when setting the group device offline.
2228 */
2229static int
2230__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
2231{
2232	struct lcs_card *card;
2233	enum lcs_dev_states recover_state;
2234	int ret;
2235
2236	LCS_DBF_TEXT(3, setup, "shtdndev");
2237	card = dev_get_drvdata(&ccwgdev->dev);
2238	if (!card)
2239		return -ENODEV;
2240	if (recovery_mode == 0) {
2241		lcs_set_allowed_threads(card, 0);
2242		if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
2243			return -ERESTARTSYS;
2244	}
2245	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2246	recover_state = card->state;
2247
2248	ret = lcs_stop_device(card->dev);
2249	ret = ccw_device_set_offline(card->read.ccwdev);
2250	ret = ccw_device_set_offline(card->write.ccwdev);
2251	if (recover_state == DEV_STATE_UP) {
2252		card->state = DEV_STATE_RECOVER;
2253	}
2254	if (ret)
2255		return ret;
2256	return 0;
2257}
2258
2259static int
2260lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
2261{
2262	return __lcs_shutdown_device(ccwgdev, 0);
2263}
2264
2265/**
2266 * drive lcs recovery after startup and startlan initiated by Lan Gateway
2267 */
2268static int
2269lcs_recovery(void *ptr)
2270{
2271	struct lcs_card *card;
2272	struct ccwgroup_device *gdev;
2273        int rc;
2274
2275	card = (struct lcs_card *) ptr;
2276
2277	LCS_DBF_TEXT(4, trace, "recover1");
2278	if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
2279		return 0;
2280	LCS_DBF_TEXT(4, trace, "recover2");
2281	gdev = card->gdev;
2282	dev_warn(&gdev->dev,
2283		"A recovery process has been started for the LCS device\n");
2284	rc = __lcs_shutdown_device(gdev, 1);
2285	rc = lcs_new_device(gdev);
2286	if (!rc)
2287		pr_info("Device %s successfully recovered!\n",
2288			card->dev->name);
2289	else
2290		pr_info("Device %s could not be recovered!\n",
2291			card->dev->name);
2292	lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
2293	return 0;
2294}
2295
2296/**
2297 * lcs_remove_device, free buffers and card
2298 */
2299static void
2300lcs_remove_device(struct ccwgroup_device *ccwgdev)
2301{
2302	struct lcs_card *card;
2303
2304	card = dev_get_drvdata(&ccwgdev->dev);
2305	if (!card)
2306		return;
2307
2308	LCS_DBF_TEXT(3, setup, "remdev");
2309	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2310	if (ccwgdev->state == CCWGROUP_ONLINE) {
2311		lcs_shutdown_device(ccwgdev);
2312	}
2313	if (card->dev)
2314		unregister_netdev(card->dev);
2315	sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2316	lcs_cleanup_card(card);
2317	lcs_free_card(card);
2318	put_device(&ccwgdev->dev);
2319}
2320
2321static int lcs_pm_suspend(struct lcs_card *card)
2322{
2323	if (card->dev)
2324		netif_device_detach(card->dev);
2325	lcs_set_allowed_threads(card, 0);
2326	lcs_wait_for_threads(card, 0xffffffff);
2327	if (card->state != DEV_STATE_DOWN)
2328		__lcs_shutdown_device(card->gdev, 1);
2329	return 0;
2330}
2331
2332static int lcs_pm_resume(struct lcs_card *card)
2333{
2334	int rc = 0;
2335
2336	if (card->state == DEV_STATE_RECOVER)
2337		rc = lcs_new_device(card->gdev);
2338	if (card->dev)
2339		netif_device_attach(card->dev);
2340	if (rc) {
2341		dev_warn(&card->gdev->dev, "The lcs device driver "
2342			"failed to recover the device\n");
2343	}
2344	return rc;
2345}
2346
2347static int lcs_prepare(struct ccwgroup_device *gdev)
2348{
2349	return 0;
2350}
2351
2352static void lcs_complete(struct ccwgroup_device *gdev)
2353{
2354	return;
2355}
2356
2357static int lcs_freeze(struct ccwgroup_device *gdev)
2358{
2359	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2360	return lcs_pm_suspend(card);
2361}
2362
2363static int lcs_thaw(struct ccwgroup_device *gdev)
2364{
2365	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2366	return lcs_pm_resume(card);
2367}
2368
2369static int lcs_restore(struct ccwgroup_device *gdev)
2370{
2371	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2372	return lcs_pm_resume(card);
2373}
2374
2375static struct ccw_device_id lcs_ids[] = {
2376	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2377	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2378	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2379	{},
2380};
2381MODULE_DEVICE_TABLE(ccw, lcs_ids);
2382
2383static struct ccw_driver lcs_ccw_driver = {
2384	.owner	= THIS_MODULE,
2385	.name	= "lcs",
2386	.ids	= lcs_ids,
2387	.probe	= ccwgroup_probe_ccwdev,
2388	.remove	= ccwgroup_remove_ccwdev,
2389};
2390
2391/**
2392 * LCS ccwgroup driver registration
2393 */
2394static struct ccwgroup_driver lcs_group_driver = {
2395	.owner       = THIS_MODULE,
2396	.name        = "lcs",
2397	.max_slaves  = 2,
2398	.driver_id   = 0xD3C3E2,
2399	.probe       = lcs_probe_device,
2400	.remove      = lcs_remove_device,
2401	.set_online  = lcs_new_device,
2402	.set_offline = lcs_shutdown_device,
2403	.prepare     = lcs_prepare,
2404	.complete    = lcs_complete,
2405	.freeze	     = lcs_freeze,
2406	.thaw	     = lcs_thaw,
2407	.restore     = lcs_restore,
2408};
2409
2410static ssize_t
2411lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2412		       size_t count)
2413{
2414	int err;
2415	err = ccwgroup_create_from_string(lcs_root_dev,
2416					  lcs_group_driver.driver_id,
2417					  &lcs_ccw_driver, 2, buf);
2418	return err ? err : count;
2419}
2420
2421static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2422
2423static struct attribute *lcs_group_attrs[] = {
2424	&driver_attr_group.attr,
2425	NULL,
2426};
2427
2428static struct attribute_group lcs_group_attr_group = {
2429	.attrs = lcs_group_attrs,
2430};
2431
2432static const struct attribute_group *lcs_group_attr_groups[] = {
2433	&lcs_group_attr_group,
2434	NULL,
2435};
2436
2437/**
2438 *  LCS Module/Kernel initialization function
2439 */
2440static int
2441__init lcs_init_module(void)
2442{
2443	int rc;
2444
2445	pr_info("Loading %s\n", version);
2446	rc = lcs_register_debug_facility();
2447	LCS_DBF_TEXT(0, setup, "lcsinit");
2448	if (rc)
2449		goto out_err;
2450	lcs_root_dev = root_device_register("lcs");
2451	rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2452	if (rc)
2453		goto register_err;
2454	rc = ccw_driver_register(&lcs_ccw_driver);
2455	if (rc)
2456		goto ccw_err;
2457	lcs_group_driver.driver.groups = lcs_group_attr_groups;
2458	rc = ccwgroup_driver_register(&lcs_group_driver);
2459	if (rc)
2460		goto ccwgroup_err;
2461	return 0;
2462
2463ccwgroup_err:
2464	ccw_driver_unregister(&lcs_ccw_driver);
2465ccw_err:
2466	root_device_unregister(lcs_root_dev);
2467register_err:
2468	lcs_unregister_debug_facility();
2469out_err:
2470	pr_err("Initializing the lcs device driver failed\n");
2471	return rc;
2472}
2473
2474
2475/**
2476 *  LCS module cleanup function
2477 */
2478static void
2479__exit lcs_cleanup_module(void)
2480{
2481	pr_info("Terminating lcs module.\n");
2482	LCS_DBF_TEXT(0, trace, "cleanup");
2483	driver_remove_file(&lcs_group_driver.driver,
2484			   &driver_attr_group);
2485	ccwgroup_driver_unregister(&lcs_group_driver);
2486	ccw_driver_unregister(&lcs_ccw_driver);
2487	root_device_unregister(lcs_root_dev);
2488	lcs_unregister_debug_facility();
2489}
2490
2491module_init(lcs_init_module);
2492module_exit(lcs_cleanup_module);
2493
2494MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
2495MODULE_LICENSE("GPL");
2496