• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/ibm_newemac/
1/*
2 * drivers/net/ibm_newemac/mal.c
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 *                <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
16 *      David Gibson <hermes@gibson.dropbear.id.au>,
17 *
18 *      Armin Kuster <akuster@mvista.com>
19 *      Copyright 2002 MontaVista Softare Inc.
20 *
21 * This program is free software; you can redistribute  it and/or modify it
22 * under  the terms of  the GNU General  Public License as published by the
23 * Free Software Foundation;  either version 2 of the  License, or (at your
24 * option) any later version.
25 *
26 */
27
28#include <linux/delay.h>
29#include <linux/slab.h>
30
31#include "core.h"
32#include <asm/dcr-regs.h>
33
34static int mal_count;
35
36int __devinit mal_register_commac(struct mal_instance	*mal,
37				  struct mal_commac	*commac)
38{
39	unsigned long flags;
40
41	spin_lock_irqsave(&mal->lock, flags);
42
43	MAL_DBG(mal, "reg(%08x, %08x)" NL,
44		commac->tx_chan_mask, commac->rx_chan_mask);
45
46	/* Don't let multiple commacs claim the same channel(s) */
47	if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
48	    (mal->rx_chan_mask & commac->rx_chan_mask)) {
49		spin_unlock_irqrestore(&mal->lock, flags);
50		printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51		       mal->index);
52		return -EBUSY;
53	}
54
55	if (list_empty(&mal->list))
56		napi_enable(&mal->napi);
57	mal->tx_chan_mask |= commac->tx_chan_mask;
58	mal->rx_chan_mask |= commac->rx_chan_mask;
59	list_add(&commac->list, &mal->list);
60
61	spin_unlock_irqrestore(&mal->lock, flags);
62
63	return 0;
64}
65
66void mal_unregister_commac(struct mal_instance	*mal,
67		struct mal_commac *commac)
68{
69	unsigned long flags;
70
71	spin_lock_irqsave(&mal->lock, flags);
72
73	MAL_DBG(mal, "unreg(%08x, %08x)" NL,
74		commac->tx_chan_mask, commac->rx_chan_mask);
75
76	mal->tx_chan_mask &= ~commac->tx_chan_mask;
77	mal->rx_chan_mask &= ~commac->rx_chan_mask;
78	list_del_init(&commac->list);
79	if (list_empty(&mal->list))
80		napi_disable(&mal->napi);
81
82	spin_unlock_irqrestore(&mal->lock, flags);
83}
84
85int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
86{
87	BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
88	       size > MAL_MAX_RX_SIZE);
89
90	MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
91
92	if (size & 0xf) {
93		printk(KERN_WARNING
94		       "mal%d: incorrect RX size %lu for the channel %d\n",
95		       mal->index, size, channel);
96		return -EINVAL;
97	}
98
99	set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
100	return 0;
101}
102
103int mal_tx_bd_offset(struct mal_instance *mal, int channel)
104{
105	BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
106
107	return channel * NUM_TX_BUFF;
108}
109
110int mal_rx_bd_offset(struct mal_instance *mal, int channel)
111{
112	BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
113	return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
114}
115
116void mal_enable_tx_channel(struct mal_instance *mal, int channel)
117{
118	unsigned long flags;
119
120	spin_lock_irqsave(&mal->lock, flags);
121
122	MAL_DBG(mal, "enable_tx(%d)" NL, channel);
123
124	set_mal_dcrn(mal, MAL_TXCASR,
125		     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
126
127	spin_unlock_irqrestore(&mal->lock, flags);
128}
129
130void mal_disable_tx_channel(struct mal_instance *mal, int channel)
131{
132	set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
133
134	MAL_DBG(mal, "disable_tx(%d)" NL, channel);
135}
136
137void mal_enable_rx_channel(struct mal_instance *mal, int channel)
138{
139	unsigned long flags;
140
141	/*
142	 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
143	 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
144	 * for the bitmask
145	 */
146	if (!(channel % 8))
147		channel >>= 3;
148
149	spin_lock_irqsave(&mal->lock, flags);
150
151	MAL_DBG(mal, "enable_rx(%d)" NL, channel);
152
153	set_mal_dcrn(mal, MAL_RXCASR,
154		     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
155
156	spin_unlock_irqrestore(&mal->lock, flags);
157}
158
159void mal_disable_rx_channel(struct mal_instance *mal, int channel)
160{
161	/*
162	 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
163	 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
164	 * for the bitmask
165	 */
166	if (!(channel % 8))
167		channel >>= 3;
168
169	set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
170
171	MAL_DBG(mal, "disable_rx(%d)" NL, channel);
172}
173
174void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
175{
176	unsigned long flags;
177
178	spin_lock_irqsave(&mal->lock, flags);
179
180	MAL_DBG(mal, "poll_add(%p)" NL, commac);
181
182	/* starts disabled */
183	set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
184
185	list_add_tail(&commac->poll_list, &mal->poll_list);
186
187	spin_unlock_irqrestore(&mal->lock, flags);
188}
189
190void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
191{
192	unsigned long flags;
193
194	spin_lock_irqsave(&mal->lock, flags);
195
196	MAL_DBG(mal, "poll_del(%p)" NL, commac);
197
198	list_del(&commac->poll_list);
199
200	spin_unlock_irqrestore(&mal->lock, flags);
201}
202
203/* synchronized by mal_poll() */
204static inline void mal_enable_eob_irq(struct mal_instance *mal)
205{
206	MAL_DBG2(mal, "enable_irq" NL);
207
208	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
209}
210
211/* synchronized by NAPI state */
212static inline void mal_disable_eob_irq(struct mal_instance *mal)
213{
214	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
215
216	MAL_DBG2(mal, "disable_irq" NL);
217}
218
219static irqreturn_t mal_serr(int irq, void *dev_instance)
220{
221	struct mal_instance *mal = dev_instance;
222
223	u32 esr = get_mal_dcrn(mal, MAL_ESR);
224
225	/* Clear the error status register */
226	set_mal_dcrn(mal, MAL_ESR, esr);
227
228	MAL_DBG(mal, "SERR %08x" NL, esr);
229
230	if (esr & MAL_ESR_EVB) {
231		if (esr & MAL_ESR_DE) {
232			/* We ignore Descriptor error,
233			 * TXDE or RXDE interrupt will be generated anyway.
234			 */
235			return IRQ_HANDLED;
236		}
237
238		if (esr & MAL_ESR_PEIN) {
239			/* PLB error, it's probably buggy hardware or
240			 * incorrect physical address in BD (i.e. bug)
241			 */
242			if (net_ratelimit())
243				printk(KERN_ERR
244				       "mal%d: system error, "
245				       "PLB (ESR = 0x%08x)\n",
246				       mal->index, esr);
247			return IRQ_HANDLED;
248		}
249
250		/* OPB error, it's probably buggy hardware or incorrect
251		 * EBC setup
252		 */
253		if (net_ratelimit())
254			printk(KERN_ERR
255			       "mal%d: system error, OPB (ESR = 0x%08x)\n",
256			       mal->index, esr);
257	}
258	return IRQ_HANDLED;
259}
260
261static inline void mal_schedule_poll(struct mal_instance *mal)
262{
263	if (likely(napi_schedule_prep(&mal->napi))) {
264		MAL_DBG2(mal, "schedule_poll" NL);
265		mal_disable_eob_irq(mal);
266		__napi_schedule(&mal->napi);
267	} else
268		MAL_DBG2(mal, "already in poll" NL);
269}
270
271static irqreturn_t mal_txeob(int irq, void *dev_instance)
272{
273	struct mal_instance *mal = dev_instance;
274
275	u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
276
277	MAL_DBG2(mal, "txeob %08x" NL, r);
278
279	mal_schedule_poll(mal);
280	set_mal_dcrn(mal, MAL_TXEOBISR, r);
281
282#ifdef CONFIG_PPC_DCR_NATIVE
283	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
284		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
285				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
286#endif
287
288	return IRQ_HANDLED;
289}
290
291static irqreturn_t mal_rxeob(int irq, void *dev_instance)
292{
293	struct mal_instance *mal = dev_instance;
294
295	u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
296
297	MAL_DBG2(mal, "rxeob %08x" NL, r);
298
299	mal_schedule_poll(mal);
300	set_mal_dcrn(mal, MAL_RXEOBISR, r);
301
302#ifdef CONFIG_PPC_DCR_NATIVE
303	if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
304		mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
305				(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
306#endif
307
308	return IRQ_HANDLED;
309}
310
311static irqreturn_t mal_txde(int irq, void *dev_instance)
312{
313	struct mal_instance *mal = dev_instance;
314
315	u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
316	set_mal_dcrn(mal, MAL_TXDEIR, deir);
317
318	MAL_DBG(mal, "txde %08x" NL, deir);
319
320	if (net_ratelimit())
321		printk(KERN_ERR
322		       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
323		       mal->index, deir);
324
325	return IRQ_HANDLED;
326}
327
328static irqreturn_t mal_rxde(int irq, void *dev_instance)
329{
330	struct mal_instance *mal = dev_instance;
331	struct list_head *l;
332
333	u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
334
335	MAL_DBG(mal, "rxde %08x" NL, deir);
336
337	list_for_each(l, &mal->list) {
338		struct mal_commac *mc = list_entry(l, struct mal_commac, list);
339		if (deir & mc->rx_chan_mask) {
340			set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
341			mc->ops->rxde(mc->dev);
342		}
343	}
344
345	mal_schedule_poll(mal);
346	set_mal_dcrn(mal, MAL_RXDEIR, deir);
347
348	return IRQ_HANDLED;
349}
350
351static irqreturn_t mal_int(int irq, void *dev_instance)
352{
353	struct mal_instance *mal = dev_instance;
354	u32 esr = get_mal_dcrn(mal, MAL_ESR);
355
356	if (esr & MAL_ESR_EVB) {
357		/* descriptor error */
358		if (esr & MAL_ESR_DE) {
359			if (esr & MAL_ESR_CIDT)
360				return mal_rxde(irq, dev_instance);
361			else
362				return mal_txde(irq, dev_instance);
363		} else { /* SERR */
364			return mal_serr(irq, dev_instance);
365		}
366	}
367	return IRQ_HANDLED;
368}
369
370void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
371{
372	/* Spinlock-type semantics: only one caller disable poll at a time */
373	while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
374		msleep(1);
375
376	/* Synchronize with the MAL NAPI poller */
377	napi_synchronize(&mal->napi);
378}
379
380void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
381{
382	smp_wmb();
383	clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
384
385	/* Feels better to trigger a poll here to catch up with events that
386	 * may have happened on this channel while disabled. It will most
387	 * probably be delayed until the next interrupt but that's mostly a
388	 * non-issue in the context where this is called.
389	 */
390	napi_schedule(&mal->napi);
391}
392
393static int mal_poll(struct napi_struct *napi, int budget)
394{
395	struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
396	struct list_head *l;
397	int received = 0;
398	unsigned long flags;
399
400	MAL_DBG2(mal, "poll(%d)" NL, budget);
401 again:
402	/* Process TX skbs */
403	list_for_each(l, &mal->poll_list) {
404		struct mal_commac *mc =
405			list_entry(l, struct mal_commac, poll_list);
406		mc->ops->poll_tx(mc->dev);
407	}
408
409	/* Process RX skbs.
410	 *
411	 * We _might_ need something more smart here to enforce polling
412	 * fairness.
413	 */
414	list_for_each(l, &mal->poll_list) {
415		struct mal_commac *mc =
416			list_entry(l, struct mal_commac, poll_list);
417		int n;
418		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
419			continue;
420		n = mc->ops->poll_rx(mc->dev, budget);
421		if (n) {
422			received += n;
423			budget -= n;
424			if (budget <= 0)
425				goto more_work;
426		}
427	}
428
429	/* We need to disable IRQs to protect from RXDE IRQ here */
430	spin_lock_irqsave(&mal->lock, flags);
431	__napi_complete(napi);
432	mal_enable_eob_irq(mal);
433	spin_unlock_irqrestore(&mal->lock, flags);
434
435	/* Check for "rotting" packet(s) */
436	list_for_each(l, &mal->poll_list) {
437		struct mal_commac *mc =
438			list_entry(l, struct mal_commac, poll_list);
439		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
440			continue;
441		if (unlikely(mc->ops->peek_rx(mc->dev) ||
442			     test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
443			MAL_DBG2(mal, "rotting packet" NL);
444			if (napi_reschedule(napi))
445				mal_disable_eob_irq(mal);
446			else
447				MAL_DBG2(mal, "already in poll list" NL);
448
449			if (budget > 0)
450				goto again;
451			else
452				goto more_work;
453		}
454		mc->ops->poll_tx(mc->dev);
455	}
456
457 more_work:
458	MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
459	return received;
460}
461
462static void mal_reset(struct mal_instance *mal)
463{
464	int n = 10;
465
466	MAL_DBG(mal, "reset" NL);
467
468	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
469
470	/* Wait for reset to complete (1 system clock) */
471	while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
472		--n;
473
474	if (unlikely(!n))
475		printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
476}
477
478int mal_get_regs_len(struct mal_instance *mal)
479{
480	return sizeof(struct emac_ethtool_regs_subhdr) +
481	    sizeof(struct mal_regs);
482}
483
484void *mal_dump_regs(struct mal_instance *mal, void *buf)
485{
486	struct emac_ethtool_regs_subhdr *hdr = buf;
487	struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
488	int i;
489
490	hdr->version = mal->version;
491	hdr->index = mal->index;
492
493	regs->tx_count = mal->num_tx_chans;
494	regs->rx_count = mal->num_rx_chans;
495
496	regs->cfg = get_mal_dcrn(mal, MAL_CFG);
497	regs->esr = get_mal_dcrn(mal, MAL_ESR);
498	regs->ier = get_mal_dcrn(mal, MAL_IER);
499	regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
500	regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
501	regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
502	regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
503	regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
504	regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
505	regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
506	regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
507
508	for (i = 0; i < regs->tx_count; ++i)
509		regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
510
511	for (i = 0; i < regs->rx_count; ++i) {
512		regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
513		regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
514	}
515	return regs + 1;
516}
517
518static int __devinit mal_probe(struct platform_device *ofdev,
519			       const struct of_device_id *match)
520{
521	struct mal_instance *mal;
522	int err = 0, i, bd_size;
523	int index = mal_count++;
524	unsigned int dcr_base;
525	const u32 *prop;
526	u32 cfg;
527	unsigned long irqflags;
528	irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
529
530	mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
531	if (!mal) {
532		printk(KERN_ERR
533		       "mal%d: out of memory allocating MAL structure!\n",
534		       index);
535		return -ENOMEM;
536	}
537	mal->index = index;
538	mal->ofdev = ofdev;
539	mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
540
541	MAL_DBG(mal, "probe" NL);
542
543	prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
544	if (prop == NULL) {
545		printk(KERN_ERR
546		       "mal%d: can't find MAL num-tx-chans property!\n",
547		       index);
548		err = -ENODEV;
549		goto fail;
550	}
551	mal->num_tx_chans = prop[0];
552
553	prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
554	if (prop == NULL) {
555		printk(KERN_ERR
556		       "mal%d: can't find MAL num-rx-chans property!\n",
557		       index);
558		err = -ENODEV;
559		goto fail;
560	}
561	mal->num_rx_chans = prop[0];
562
563	dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
564	if (dcr_base == 0) {
565		printk(KERN_ERR
566		       "mal%d: can't find DCR resource!\n", index);
567		err = -ENODEV;
568		goto fail;
569	}
570	mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
571	if (!DCR_MAP_OK(mal->dcr_host)) {
572		printk(KERN_ERR
573		       "mal%d: failed to map DCRs !\n", index);
574		err = -ENODEV;
575		goto fail;
576	}
577
578	if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
579#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
580	defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
581		mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
582				MAL_FTR_COMMON_ERR_INT);
583#else
584		printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
585				ofdev->dev.of_node->full_name);
586		err = -ENODEV;
587		goto fail;
588#endif
589	}
590
591	mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
592	mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
593	mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
594
595	if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
596		mal->txde_irq = mal->rxde_irq = mal->serr_irq;
597	} else {
598		mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
599		mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
600	}
601
602	if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
603	    mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
604	    mal->rxde_irq == NO_IRQ) {
605		printk(KERN_ERR
606		       "mal%d: failed to map interrupts !\n", index);
607		err = -ENODEV;
608		goto fail_unmap;
609	}
610
611	INIT_LIST_HEAD(&mal->poll_list);
612	INIT_LIST_HEAD(&mal->list);
613	spin_lock_init(&mal->lock);
614
615	init_dummy_netdev(&mal->dummy_dev);
616
617	netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
618		       CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
619
620	/* Load power-on reset defaults */
621	mal_reset(mal);
622
623	/* Set the MAL configuration register */
624	cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
625	cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
626
627	/* Current Axon is not happy with priority being non-0, it can
628	 * deadlock, fix it up here
629	 */
630	if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
631		cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
632
633	/* Apply configuration */
634	set_mal_dcrn(mal, MAL_CFG, cfg);
635
636	/* Allocate space for BD rings */
637	BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
638	BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
639
640	bd_size = sizeof(struct mal_descriptor) *
641		(NUM_TX_BUFF * mal->num_tx_chans +
642		 NUM_RX_BUFF * mal->num_rx_chans);
643	mal->bd_virt =
644		dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
645				   GFP_KERNEL);
646	if (mal->bd_virt == NULL) {
647		printk(KERN_ERR
648		       "mal%d: out of memory allocating RX/TX descriptors!\n",
649		       index);
650		err = -ENOMEM;
651		goto fail_unmap;
652	}
653	memset(mal->bd_virt, 0, bd_size);
654
655	for (i = 0; i < mal->num_tx_chans; ++i)
656		set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
657			     sizeof(struct mal_descriptor) *
658			     mal_tx_bd_offset(mal, i));
659
660	for (i = 0; i < mal->num_rx_chans; ++i)
661		set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
662			     sizeof(struct mal_descriptor) *
663			     mal_rx_bd_offset(mal, i));
664
665	if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
666		irqflags = IRQF_SHARED;
667		hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
668	} else {
669		irqflags = 0;
670		hdlr_serr = mal_serr;
671		hdlr_txde = mal_txde;
672		hdlr_rxde = mal_rxde;
673	}
674
675	err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
676	if (err)
677		goto fail2;
678	err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
679	if (err)
680		goto fail3;
681	err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
682	if (err)
683		goto fail4;
684	err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
685	if (err)
686		goto fail5;
687	err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
688	if (err)
689		goto fail6;
690
691	/* Enable all MAL SERR interrupt sources */
692	if (mal->version == 2)
693		set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
694	else
695		set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
696
697	/* Enable EOB interrupt */
698	mal_enable_eob_irq(mal);
699
700	printk(KERN_INFO
701	       "MAL v%d %s, %d TX channels, %d RX channels\n",
702	       mal->version, ofdev->dev.of_node->full_name,
703	       mal->num_tx_chans, mal->num_rx_chans);
704
705	/* Advertise this instance to the rest of the world */
706	wmb();
707	dev_set_drvdata(&ofdev->dev, mal);
708
709	mal_dbg_register(mal);
710
711	return 0;
712
713 fail6:
714	free_irq(mal->rxde_irq, mal);
715 fail5:
716	free_irq(mal->txeob_irq, mal);
717 fail4:
718	free_irq(mal->txde_irq, mal);
719 fail3:
720	free_irq(mal->serr_irq, mal);
721 fail2:
722	dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
723 fail_unmap:
724	dcr_unmap(mal->dcr_host, 0x100);
725 fail:
726	kfree(mal);
727
728	return err;
729}
730
731static int __devexit mal_remove(struct platform_device *ofdev)
732{
733	struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
734
735	MAL_DBG(mal, "remove" NL);
736
737	/* Synchronize with scheduled polling */
738	napi_disable(&mal->napi);
739
740	if (!list_empty(&mal->list)) {
741		/* This is *very* bad */
742		printk(KERN_EMERG
743		       "mal%d: commac list is not empty on remove!\n",
744		       mal->index);
745		WARN_ON(1);
746	}
747
748	dev_set_drvdata(&ofdev->dev, NULL);
749
750	free_irq(mal->serr_irq, mal);
751	free_irq(mal->txde_irq, mal);
752	free_irq(mal->txeob_irq, mal);
753	free_irq(mal->rxde_irq, mal);
754	free_irq(mal->rxeob_irq, mal);
755
756	mal_reset(mal);
757
758	mal_dbg_unregister(mal);
759
760	dma_free_coherent(&ofdev->dev,
761			  sizeof(struct mal_descriptor) *
762			  (NUM_TX_BUFF * mal->num_tx_chans +
763			   NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
764			  mal->bd_dma);
765	kfree(mal);
766
767	return 0;
768}
769
770static struct of_device_id mal_platform_match[] =
771{
772	{
773		.compatible	= "ibm,mcmal",
774	},
775	{
776		.compatible	= "ibm,mcmal2",
777	},
778	/* Backward compat */
779	{
780		.type		= "mcmal-dma",
781		.compatible	= "ibm,mcmal",
782	},
783	{
784		.type		= "mcmal-dma",
785		.compatible	= "ibm,mcmal2",
786	},
787	{},
788};
789
790static struct of_platform_driver mal_of_driver = {
791	.driver = {
792		.name = "mcmal",
793		.owner = THIS_MODULE,
794		.of_match_table = mal_platform_match,
795	},
796	.probe = mal_probe,
797	.remove = mal_remove,
798};
799
800int __init mal_init(void)
801{
802	return of_register_platform_driver(&mal_of_driver);
803}
804
805void mal_exit(void)
806{
807	of_unregister_platform_driver(&mal_of_driver);
808}
809