1/*
2 * drivers/net/ibm_emac/ibm_emac_mal.c
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 *      Benjamin Herrenschmidt <benh@kernel.crashing.org>,
11 *      David Gibson <hermes@gibson.dropbear.id.au>,
12 *
13 *      Armin Kuster <akuster@mvista.com>
14 *      Copyright 2002 MontaVista Softare Inc.
15 *
16 * This program is free software; you can redistribute  it and/or modify it
17 * under  the terms of  the GNU General  Public License as published by the
18 * Free Software Foundation;  either version 2 of the  License, or (at your
19 * option) any later version.
20 *
21 */
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/netdevice.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/ocp.h>
31
32#include "ibm_emac_core.h"
33#include "ibm_emac_mal.h"
34#include "ibm_emac_debug.h"
35
36int __init mal_register_commac(struct ibm_ocp_mal *mal,
37			       struct mal_commac *commac)
38{
39	unsigned long flags;
40	local_irq_save(flags);
41
42	MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index,
43		commac->tx_chan_mask, commac->rx_chan_mask);
44
45	/* Don't let multiple commacs claim the same channel(s) */
46	if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
47	    (mal->rx_chan_mask & commac->rx_chan_mask)) {
48		local_irq_restore(flags);
49		printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
50		       mal->def->index);
51		return -EBUSY;
52	}
53
54	mal->tx_chan_mask |= commac->tx_chan_mask;
55	mal->rx_chan_mask |= commac->rx_chan_mask;
56	list_add(&commac->list, &mal->list);
57
58	local_irq_restore(flags);
59	return 0;
60}
61
62void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
63{
64	unsigned long flags;
65	local_irq_save(flags);
66
67	MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index,
68		commac->tx_chan_mask, commac->rx_chan_mask);
69
70	mal->tx_chan_mask &= ~commac->tx_chan_mask;
71	mal->rx_chan_mask &= ~commac->rx_chan_mask;
72	list_del_init(&commac->list);
73
74	local_irq_restore(flags);
75}
76
77int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
78{
79	struct ocp_func_mal_data *maldata = mal->def->additions;
80	BUG_ON(channel < 0 || channel >= maldata->num_rx_chans ||
81	       size > MAL_MAX_RX_SIZE);
82
83	MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size);
84
85	if (size & 0xf) {
86		printk(KERN_WARNING
87		       "mal%d: incorrect RX size %lu for the channel %d\n",
88		       mal->def->index, size, channel);
89		return -EINVAL;
90	}
91
92	set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
93	return 0;
94}
95
96int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel)
97{
98	struct ocp_func_mal_data *maldata = mal->def->additions;
99	BUG_ON(channel < 0 || channel >= maldata->num_tx_chans);
100	return channel * NUM_TX_BUFF;
101}
102
103int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel)
104{
105	struct ocp_func_mal_data *maldata = mal->def->additions;
106	BUG_ON(channel < 0 || channel >= maldata->num_rx_chans);
107	return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
108}
109
110void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel)
111{
112	local_bh_disable();
113	MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel);
114	set_mal_dcrn(mal, MAL_TXCASR,
115		     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
116	local_bh_enable();
117}
118
119void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel)
120{
121	set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
122	MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel);
123}
124
125void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel)
126{
127	local_bh_disable();
128	MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel);
129	set_mal_dcrn(mal, MAL_RXCASR,
130		     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
131	local_bh_enable();
132}
133
134void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel)
135{
136	set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
137	MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel);
138}
139
140void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac)
141{
142	local_bh_disable();
143	MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac);
144	list_add_tail(&commac->poll_list, &mal->poll_list);
145	local_bh_enable();
146}
147
148void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac)
149{
150	local_bh_disable();
151	MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac);
152	list_del(&commac->poll_list);
153	local_bh_enable();
154}
155
156/* synchronized by mal_poll() */
157static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal)
158{
159	MAL_DBG2("%d: enable_irq" NL, mal->def->index);
160	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
161}
162
163/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */
164static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal)
165{
166	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
167	MAL_DBG2("%d: disable_irq" NL, mal->def->index);
168}
169
170static irqreturn_t mal_serr(int irq, void *dev_instance)
171{
172	struct ibm_ocp_mal *mal = dev_instance;
173	u32 esr = get_mal_dcrn(mal, MAL_ESR);
174
175	/* Clear the error status register */
176	set_mal_dcrn(mal, MAL_ESR, esr);
177
178	MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr);
179
180	if (esr & MAL_ESR_EVB) {
181		if (esr & MAL_ESR_DE) {
182			/* We ignore Descriptor error,
183			 * TXDE or RXDE interrupt will be generated anyway.
184			 */
185			return IRQ_HANDLED;
186		}
187
188		if (esr & MAL_ESR_PEIN) {
189			/* PLB error, it's probably buggy hardware or
190			 * incorrect physical address in BD (i.e. bug)
191			 */
192			if (net_ratelimit())
193				printk(KERN_ERR
194				       "mal%d: system error, PLB (ESR = 0x%08x)\n",
195				       mal->def->index, esr);
196			return IRQ_HANDLED;
197		}
198
199		/* OPB error, it's probably buggy hardware or incorrect EBC setup */
200		if (net_ratelimit())
201			printk(KERN_ERR
202			       "mal%d: system error, OPB (ESR = 0x%08x)\n",
203			       mal->def->index, esr);
204	}
205	return IRQ_HANDLED;
206}
207
208static inline void mal_schedule_poll(struct ibm_ocp_mal *mal)
209{
210	if (likely(netif_rx_schedule_prep(&mal->poll_dev))) {
211		MAL_DBG2("%d: schedule_poll" NL, mal->def->index);
212		mal_disable_eob_irq(mal);
213		__netif_rx_schedule(&mal->poll_dev);
214	} else
215		MAL_DBG2("%d: already in poll" NL, mal->def->index);
216}
217
218static irqreturn_t mal_txeob(int irq, void *dev_instance)
219{
220	struct ibm_ocp_mal *mal = dev_instance;
221	u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
222	MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r);
223	mal_schedule_poll(mal);
224	set_mal_dcrn(mal, MAL_TXEOBISR, r);
225	return IRQ_HANDLED;
226}
227
228static irqreturn_t mal_rxeob(int irq, void *dev_instance)
229{
230	struct ibm_ocp_mal *mal = dev_instance;
231	u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
232	MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r);
233	mal_schedule_poll(mal);
234	set_mal_dcrn(mal, MAL_RXEOBISR, r);
235	return IRQ_HANDLED;
236}
237
238static irqreturn_t mal_txde(int irq, void *dev_instance)
239{
240	struct ibm_ocp_mal *mal = dev_instance;
241	u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
242	set_mal_dcrn(mal, MAL_TXDEIR, deir);
243
244	MAL_DBG("%d: txde %08x" NL, mal->def->index, deir);
245
246	if (net_ratelimit())
247		printk(KERN_ERR
248		       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
249		       mal->def->index, deir);
250
251	return IRQ_HANDLED;
252}
253
254static irqreturn_t mal_rxde(int irq, void *dev_instance)
255{
256	struct ibm_ocp_mal *mal = dev_instance;
257	struct list_head *l;
258	u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
259
260	MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir);
261
262	list_for_each(l, &mal->list) {
263		struct mal_commac *mc = list_entry(l, struct mal_commac, list);
264		if (deir & mc->rx_chan_mask) {
265			mc->rx_stopped = 1;
266			mc->ops->rxde(mc->dev);
267		}
268	}
269
270	mal_schedule_poll(mal);
271	set_mal_dcrn(mal, MAL_RXDEIR, deir);
272
273	return IRQ_HANDLED;
274}
275
276static int mal_poll(struct net_device *ndev, int *budget)
277{
278	struct ibm_ocp_mal *mal = ndev->priv;
279	struct list_head *l;
280	int rx_work_limit = min(ndev->quota, *budget), received = 0, done;
281
282	MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget,
283		 rx_work_limit);
284      again:
285	/* Process TX skbs */
286	list_for_each(l, &mal->poll_list) {
287		struct mal_commac *mc =
288		    list_entry(l, struct mal_commac, poll_list);
289		mc->ops->poll_tx(mc->dev);
290	}
291
292	/* Process RX skbs.
293	 * We _might_ need something more smart here to enforce polling fairness.
294	 */
295	list_for_each(l, &mal->poll_list) {
296		struct mal_commac *mc =
297		    list_entry(l, struct mal_commac, poll_list);
298		int n = mc->ops->poll_rx(mc->dev, rx_work_limit);
299		if (n) {
300			received += n;
301			rx_work_limit -= n;
302			if (rx_work_limit <= 0) {
303				done = 0;
304				goto more_work;
305			}
306		}
307	}
308
309	/* We need to disable IRQs to protect from RXDE IRQ here */
310	local_irq_disable();
311	__netif_rx_complete(ndev);
312	mal_enable_eob_irq(mal);
313	local_irq_enable();
314
315	done = 1;
316
317	/* Check for "rotting" packet(s) */
318	list_for_each(l, &mal->poll_list) {
319		struct mal_commac *mc =
320		    list_entry(l, struct mal_commac, poll_list);
321		if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) {
322			MAL_DBG2("%d: rotting packet" NL, mal->def->index);
323			if (netif_rx_reschedule(ndev, received))
324				mal_disable_eob_irq(mal);
325			else
326				MAL_DBG2("%d: already in poll list" NL,
327					 mal->def->index);
328
329			if (rx_work_limit > 0)
330				goto again;
331			else
332				goto more_work;
333		}
334		mc->ops->poll_tx(mc->dev);
335	}
336
337      more_work:
338	ndev->quota -= received;
339	*budget -= received;
340
341	MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget,
342		 done ? 0 : 1);
343	return done ? 0 : 1;
344}
345
346static void mal_reset(struct ibm_ocp_mal *mal)
347{
348	int n = 10;
349	MAL_DBG("%d: reset" NL, mal->def->index);
350
351	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
352
353	/* Wait for reset to complete (1 system clock) */
354	while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
355		--n;
356
357	if (unlikely(!n))
358		printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index);
359}
360
361int mal_get_regs_len(struct ibm_ocp_mal *mal)
362{
363	return sizeof(struct emac_ethtool_regs_subhdr) +
364	    sizeof(struct ibm_mal_regs);
365}
366
367void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf)
368{
369	struct emac_ethtool_regs_subhdr *hdr = buf;
370	struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1);
371	struct ocp_func_mal_data *maldata = mal->def->additions;
372	int i;
373
374	hdr->version = MAL_VERSION;
375	hdr->index = mal->def->index;
376
377	regs->tx_count = maldata->num_tx_chans;
378	regs->rx_count = maldata->num_rx_chans;
379
380	regs->cfg = get_mal_dcrn(mal, MAL_CFG);
381	regs->esr = get_mal_dcrn(mal, MAL_ESR);
382	regs->ier = get_mal_dcrn(mal, MAL_IER);
383	regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
384	regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
385	regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
386	regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
387	regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
388	regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
389	regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
390	regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
391
392	for (i = 0; i < regs->tx_count; ++i)
393		regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
394
395	for (i = 0; i < regs->rx_count; ++i) {
396		regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
397		regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
398	}
399	return regs + 1;
400}
401
402static int __init mal_probe(struct ocp_device *ocpdev)
403{
404	struct ibm_ocp_mal *mal;
405	struct ocp_func_mal_data *maldata;
406	int err = 0, i, bd_size;
407
408	MAL_DBG("%d: probe" NL, ocpdev->def->index);
409
410	maldata = ocpdev->def->additions;
411	if (maldata == NULL) {
412		printk(KERN_ERR "mal%d: missing additional data!\n",
413		       ocpdev->def->index);
414		return -ENODEV;
415	}
416
417	mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
418	if (!mal) {
419		printk(KERN_ERR
420		       "mal%d: out of memory allocating MAL structure!\n",
421		       ocpdev->def->index);
422		return -ENOMEM;
423	}
424	mal->dcrbase = maldata->dcr_base;
425	mal->def = ocpdev->def;
426
427	INIT_LIST_HEAD(&mal->poll_list);
428	set_bit(__LINK_STATE_START, &mal->poll_dev.state);
429	mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT;
430	mal->poll_dev.poll = mal_poll;
431	mal->poll_dev.priv = mal;
432	atomic_set(&mal->poll_dev.refcnt, 1);
433
434	INIT_LIST_HEAD(&mal->list);
435
436	/* Load power-on reset defaults */
437	mal_reset(mal);
438
439	/* Set the MAL configuration register */
440	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB |
441		     MAL_CFG_OPBBL | MAL_CFG_LEA);
442
443	mal_enable_eob_irq(mal);
444
445	/* Allocate space for BD rings */
446	BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32);
447	BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32);
448	bd_size = sizeof(struct mal_descriptor) *
449	    (NUM_TX_BUFF * maldata->num_tx_chans +
450	     NUM_RX_BUFF * maldata->num_rx_chans);
451	mal->bd_virt =
452	    dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL);
453
454	if (!mal->bd_virt) {
455		printk(KERN_ERR
456		       "mal%d: out of memory allocating RX/TX descriptors!\n",
457		       mal->def->index);
458		err = -ENOMEM;
459		goto fail;
460	}
461	memset(mal->bd_virt, 0, bd_size);
462
463	for (i = 0; i < maldata->num_tx_chans; ++i)
464		set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
465			     sizeof(struct mal_descriptor) *
466			     mal_tx_bd_offset(mal, i));
467
468	for (i = 0; i < maldata->num_rx_chans; ++i)
469		set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
470			     sizeof(struct mal_descriptor) *
471			     mal_rx_bd_offset(mal, i));
472
473	err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
474	if (err)
475		goto fail2;
476	err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal);
477	if (err)
478		goto fail3;
479	err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
480	if (err)
481		goto fail4;
482	err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
483	if (err)
484		goto fail5;
485	err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
486	if (err)
487		goto fail6;
488
489	/* Enable all MAL SERR interrupt sources */
490	set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
491
492	/* Advertise this instance to the rest of the world */
493	ocp_set_drvdata(ocpdev, mal);
494
495	mal_dbg_register(mal->def->index, mal);
496
497	printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n",
498	       mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans);
499	return 0;
500
501      fail6:
502	free_irq(maldata->rxde_irq, mal);
503      fail5:
504	free_irq(maldata->txeob_irq, mal);
505      fail4:
506	free_irq(maldata->txde_irq, mal);
507      fail3:
508	free_irq(maldata->serr_irq, mal);
509      fail2:
510	dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
511      fail:
512	kfree(mal);
513	return err;
514}
515
516static void __exit mal_remove(struct ocp_device *ocpdev)
517{
518	struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
519	struct ocp_func_mal_data *maldata = mal->def->additions;
520
521	MAL_DBG("%d: remove" NL, mal->def->index);
522
523	/* Syncronize with scheduled polling,
524	   stolen from net/core/dev.c:dev_close()
525	 */
526	clear_bit(__LINK_STATE_START, &mal->poll_dev.state);
527	netif_poll_disable(&mal->poll_dev);
528
529	if (!list_empty(&mal->list)) {
530		/* This is *very* bad */
531		printk(KERN_EMERG
532		       "mal%d: commac list is not empty on remove!\n",
533		       mal->def->index);
534	}
535
536	ocp_set_drvdata(ocpdev, NULL);
537
538	free_irq(maldata->serr_irq, mal);
539	free_irq(maldata->txde_irq, mal);
540	free_irq(maldata->txeob_irq, mal);
541	free_irq(maldata->rxde_irq, mal);
542	free_irq(maldata->rxeob_irq, mal);
543
544	mal_reset(mal);
545
546	mal_dbg_register(mal->def->index, NULL);
547
548	dma_free_coherent(&ocpdev->dev,
549			  sizeof(struct mal_descriptor) *
550			  (NUM_TX_BUFF * maldata->num_tx_chans +
551			   NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt,
552			  mal->bd_dma);
553
554	kfree(mal);
555}
556
557/* Structure for a device driver */
558static struct ocp_device_id mal_ids[] = {
559	{ .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL },
560	{ .vendor = OCP_VENDOR_INVALID}
561};
562
563static struct ocp_driver mal_driver = {
564	.name = "mal",
565	.id_table = mal_ids,
566
567	.probe = mal_probe,
568	.remove = mal_remove,
569};
570
571int __init mal_init(void)
572{
573	MAL_DBG(": init" NL);
574	return ocp_register_driver(&mal_driver);
575}
576
577void __exit mal_exit(void)
578{
579	MAL_DBG(": exit" NL);
580	ocp_unregister_driver(&mal_driver);
581}
582