• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/message/i2o/
1/*
2 *	PCI handling of I2O controller
3 *
4 * 	Copyright (C) 1999-2002	Red Hat Software
5 *
6 *	Written by Alan Cox, Building Number Three Ltd
7 *
8 *	This program is free software; you can redistribute it and/or modify it
9 *	under the terms of the GNU General Public License as published by the
10 *	Free Software Foundation; either version 2 of the License, or (at your
11 *	option) any later version.
12 *
13 *	A lot of the I2O message side code from this is taken from the Red
14 *	Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 *	Fixes/additions:
17 *		Philipp Rumpf
18 *		Juha Siev��nen <Juha.Sievanen@cs.Helsinki.FI>
19 *		Auvo H��kkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 *		Deepak Saxena <deepak@plexity.net>
21 *		Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 *		Alan Cox <alan@lxorguk.ukuu.org.uk>:
23 *			Ported to Linux 2.5.
24 *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 *			Minor fixes for 2.6.
26 *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
27 *			Support for sysfs included.
28 */
29
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/i2o.h>
34#include "core.h"
35
36#define OSM_DESCRIPTION	"I2O-subsystem"
37
38/* PCI device id table for all I2O controllers */
39static struct pci_device_id __devinitdata i2o_pci_ids[] = {
40	{PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
41	{PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
42	{.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962,
43	 .subvendor = PCI_VENDOR_ID_PROMISE,.subdevice = PCI_ANY_ID},
44	{0}
45};
46
47/**
48 *	i2o_pci_free - Frees the DMA memory for the I2O controller
49 *	@c: I2O controller to free
50 *
51 *	Remove all allocated DMA memory and unmap memory IO regions. If MTRR
52 *	is enabled, also remove it again.
53 */
54static void i2o_pci_free(struct i2o_controller *c)
55{
56	struct device *dev;
57
58	dev = &c->pdev->dev;
59
60	i2o_dma_free(dev, &c->out_queue);
61	i2o_dma_free(dev, &c->status_block);
62	kfree(c->lct);
63	i2o_dma_free(dev, &c->dlct);
64	i2o_dma_free(dev, &c->hrt);
65	i2o_dma_free(dev, &c->status);
66
67	if (c->raptor && c->in_queue.virt)
68		iounmap(c->in_queue.virt);
69
70	if (c->base.virt)
71		iounmap(c->base.virt);
72
73	pci_release_regions(c->pdev);
74}
75
76/**
77 *	i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller
78 *	@c: I2O controller
79 *
80 *	Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All
81 *	IO mappings are also done here. If MTRR is enabled, also do add memory
82 *	regions here.
83 *
84 *	Returns 0 on success or negative error code on failure.
85 */
86static int __devinit i2o_pci_alloc(struct i2o_controller *c)
87{
88	struct pci_dev *pdev = c->pdev;
89	struct device *dev = &pdev->dev;
90	int i;
91
92	if (pci_request_regions(pdev, OSM_DESCRIPTION)) {
93		printk(KERN_ERR "%s: device already claimed\n", c->name);
94		return -ENODEV;
95	}
96
97	for (i = 0; i < 6; i++) {
98		/* Skip I/O spaces */
99		if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
100			if (!c->base.phys) {
101				c->base.phys = pci_resource_start(pdev, i);
102				c->base.len = pci_resource_len(pdev, i);
103
104				/*
105				 * If we know what card it is, set the size
106				 * correctly. Code is taken from dpt_i2o.c
107				 */
108				if (pdev->device == 0xa501) {
109					if (pdev->subsystem_device >= 0xc032 &&
110					    pdev->subsystem_device <= 0xc03b) {
111						if (c->base.len > 0x400000)
112							c->base.len = 0x400000;
113					} else {
114						if (c->base.len > 0x100000)
115							c->base.len = 0x100000;
116					}
117				}
118				if (!c->raptor)
119					break;
120			} else {
121				c->in_queue.phys = pci_resource_start(pdev, i);
122				c->in_queue.len = pci_resource_len(pdev, i);
123				break;
124			}
125		}
126	}
127
128	if (i == 6) {
129		printk(KERN_ERR "%s: I2O controller has no memory regions"
130		       " defined.\n", c->name);
131		i2o_pci_free(c);
132		return -EINVAL;
133	}
134
135	/* Map the I2O controller */
136	if (c->raptor) {
137		printk(KERN_INFO "%s: PCI I2O controller\n", c->name);
138		printk(KERN_INFO "     BAR0 at 0x%08lX size=%ld\n",
139		       (unsigned long)c->base.phys, (unsigned long)c->base.len);
140		printk(KERN_INFO "     BAR1 at 0x%08lX size=%ld\n",
141		       (unsigned long)c->in_queue.phys,
142		       (unsigned long)c->in_queue.len);
143	} else
144		printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n",
145		       c->name, (unsigned long)c->base.phys,
146		       (unsigned long)c->base.len);
147
148	c->base.virt = ioremap_nocache(c->base.phys, c->base.len);
149	if (!c->base.virt) {
150		printk(KERN_ERR "%s: Unable to map controller.\n", c->name);
151		i2o_pci_free(c);
152		return -ENOMEM;
153	}
154
155	if (c->raptor) {
156		c->in_queue.virt =
157		    ioremap_nocache(c->in_queue.phys, c->in_queue.len);
158		if (!c->in_queue.virt) {
159			printk(KERN_ERR "%s: Unable to map controller.\n",
160			       c->name);
161			i2o_pci_free(c);
162			return -ENOMEM;
163		}
164	} else
165		c->in_queue = c->base;
166
167	c->irq_status = c->base.virt + I2O_IRQ_STATUS;
168	c->irq_mask = c->base.virt + I2O_IRQ_MASK;
169	c->in_port = c->base.virt + I2O_IN_PORT;
170	c->out_port = c->base.virt + I2O_OUT_PORT;
171
172	/* Motorola/Freescale chip does not follow spec */
173	if (pdev->vendor == PCI_VENDOR_ID_MOTOROLA && pdev->device == 0x18c0) {
174		/* Check if CPU is enabled */
175		if (be32_to_cpu(readl(c->base.virt + 0x10000)) & 0x10000000) {
176			printk(KERN_INFO "%s: MPC82XX needs CPU running to "
177			       "service I2O.\n", c->name);
178			i2o_pci_free(c);
179			return -ENODEV;
180		} else {
181			c->irq_status += I2O_MOTOROLA_PORT_OFFSET;
182			c->irq_mask += I2O_MOTOROLA_PORT_OFFSET;
183			c->in_port += I2O_MOTOROLA_PORT_OFFSET;
184			c->out_port += I2O_MOTOROLA_PORT_OFFSET;
185			printk(KERN_INFO "%s: MPC82XX workarounds activated.\n",
186			       c->name);
187		}
188	}
189
190	if (i2o_dma_alloc(dev, &c->status, 8)) {
191		i2o_pci_free(c);
192		return -ENOMEM;
193	}
194
195	if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt))) {
196		i2o_pci_free(c);
197		return -ENOMEM;
198	}
199
200	if (i2o_dma_alloc(dev, &c->dlct, 8192)) {
201		i2o_pci_free(c);
202		return -ENOMEM;
203	}
204
205	if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block))) {
206		i2o_pci_free(c);
207		return -ENOMEM;
208	}
209
210	if (i2o_dma_alloc(dev, &c->out_queue,
211		I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE *
212				sizeof(u32))) {
213		i2o_pci_free(c);
214		return -ENOMEM;
215	}
216
217	pci_set_drvdata(pdev, c);
218
219	return 0;
220}
221
222/**
223 *	i2o_pci_interrupt - Interrupt handler for I2O controller
224 *	@irq: interrupt line
225 *	@dev_id: pointer to the I2O controller
226 *
227 *	Handle an interrupt from a PCI based I2O controller. This turns out
228 *	to be rather simple. We keep the controller pointer in the cookie.
229 */
230static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id)
231{
232	struct i2o_controller *c = dev_id;
233	u32 m;
234	irqreturn_t rc = IRQ_NONE;
235
236	while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) {
237		m = readl(c->out_port);
238		if (m == I2O_QUEUE_EMPTY) {
239			/*
240			 * Old 960 steppings had a bug in the I2O unit that
241			 * caused the queue to appear empty when it wasn't.
242			 */
243			m = readl(c->out_port);
244			if (unlikely(m == I2O_QUEUE_EMPTY))
245				break;
246		}
247
248		/* dispatch it */
249		if (i2o_driver_dispatch(c, m))
250			/* flush it if result != 0 */
251			i2o_flush_reply(c, m);
252
253		rc = IRQ_HANDLED;
254	}
255
256	return rc;
257}
258
259/**
260 *	i2o_pci_irq_enable - Allocate interrupt for I2O controller
261 *	@c: i2o_controller that the request is for
262 *
263 *	Allocate an interrupt for the I2O controller, and activate interrupts
264 *	on the I2O controller.
265 *
266 *	Returns 0 on success or negative error code on failure.
267 */
268static int i2o_pci_irq_enable(struct i2o_controller *c)
269{
270	struct pci_dev *pdev = c->pdev;
271	int rc;
272
273	writel(0xffffffff, c->irq_mask);
274
275	if (pdev->irq) {
276		rc = request_irq(pdev->irq, i2o_pci_interrupt, IRQF_SHARED,
277				 c->name, c);
278		if (rc < 0) {
279			printk(KERN_ERR "%s: unable to allocate interrupt %d."
280			       "\n", c->name, pdev->irq);
281			return rc;
282		}
283	}
284
285	writel(0x00000000, c->irq_mask);
286
287	printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq);
288
289	return 0;
290}
291
292/**
293 *	i2o_pci_irq_disable - Free interrupt for I2O controller
294 *	@c: I2O controller
295 *
296 *	Disable interrupts in I2O controller and then free interrupt.
297 */
298static void i2o_pci_irq_disable(struct i2o_controller *c)
299{
300	writel(0xffffffff, c->irq_mask);
301
302	if (c->pdev->irq > 0)
303		free_irq(c->pdev->irq, c);
304}
305
306/**
307 *	i2o_pci_probe - Probe the PCI device for an I2O controller
308 *	@pdev: PCI device to test
309 *	@id: id which matched with the PCI device id table
310 *
311 *	Probe the PCI device for any device which is a memory of the
312 *	Intelligent, I2O class or an Adaptec Zero Channel Controller. We
313 *	attempt to set up each such device and register it with the core.
314 *
315 *	Returns 0 on success or negative error code on failure.
316 */
317static int __devinit i2o_pci_probe(struct pci_dev *pdev,
318				   const struct pci_device_id *id)
319{
320	struct i2o_controller *c;
321	int rc;
322	struct pci_dev *i960 = NULL;
323
324	printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
325
326	if ((pdev->class & 0xff) > 1) {
327		printk(KERN_WARNING "i2o: %s does not support I2O 1.5 "
328		       "(skipping).\n", pci_name(pdev));
329		return -ENODEV;
330	}
331
332	if ((rc = pci_enable_device(pdev))) {
333		printk(KERN_WARNING "i2o: couldn't enable device %s\n",
334		       pci_name(pdev));
335		return rc;
336	}
337
338	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
339		printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
340		       pci_name(pdev));
341		rc = -ENODEV;
342		goto disable;
343	}
344
345	pci_set_master(pdev);
346
347	c = i2o_iop_alloc();
348	if (IS_ERR(c)) {
349		printk(KERN_ERR "i2o: couldn't allocate memory for %s\n",
350		       pci_name(pdev));
351		rc = PTR_ERR(c);
352		goto disable;
353	} else
354		printk(KERN_INFO "%s: controller found (%s)\n", c->name,
355		       pci_name(pdev));
356
357	c->pdev = pdev;
358	c->device.parent = &pdev->dev;
359
360	/* Cards that fall apart if you hit them with large I/O loads... */
361	if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
362		c->short_req = 1;
363		printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n",
364		       c->name);
365	}
366
367	if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) {
368		/*
369		 * Expose the ship behind i960 for initialization, or it will
370		 * failed
371		 */
372		i960 = pci_get_slot(c->pdev->bus,
373				  PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
374
375		if (i960) {
376			pci_write_config_word(i960, 0x42, 0);
377			pci_dev_put(i960);
378		}
379
380		c->promise = 1;
381		c->limit_sectors = 1;
382	}
383
384	if (pdev->subsystem_vendor == PCI_VENDOR_ID_DPT)
385		c->adaptec = 1;
386
387	/* Cards that go bananas if you quiesce them before you reset them. */
388	if (pdev->vendor == PCI_VENDOR_ID_DPT) {
389		c->no_quiesce = 1;
390		if (pdev->device == 0xa511)
391			c->raptor = 1;
392
393		if (pdev->subsystem_device == 0xc05a) {
394			c->limit_sectors = 1;
395			printk(KERN_INFO
396			       "%s: limit sectors per request to %d\n", c->name,
397			       I2O_MAX_SECTORS_LIMITED);
398		}
399#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
400		if (sizeof(dma_addr_t) > 4) {
401			if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
402				printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
403				       c->name);
404			else {
405				c->pae_support = 1;
406				printk(KERN_INFO "%s: using 64-bit DMA\n",
407				       c->name);
408			}
409		}
410#endif
411	}
412
413	if ((rc = i2o_pci_alloc(c))) {
414		printk(KERN_ERR "%s: DMA / IO allocation for I2O controller "
415		       "failed\n", c->name);
416		goto free_controller;
417	}
418
419	if (i2o_pci_irq_enable(c)) {
420		printk(KERN_ERR "%s: unable to enable interrupts for I2O "
421		       "controller\n", c->name);
422		goto free_pci;
423	}
424
425	if ((rc = i2o_iop_add(c)))
426		goto uninstall;
427
428	if (i960)
429		pci_write_config_word(i960, 0x42, 0x03ff);
430
431	return 0;
432
433      uninstall:
434	i2o_pci_irq_disable(c);
435
436      free_pci:
437	i2o_pci_free(c);
438
439      free_controller:
440	i2o_iop_free(c);
441
442      disable:
443	pci_disable_device(pdev);
444
445	return rc;
446}
447
448/**
449 *	i2o_pci_remove - Removes a I2O controller from the system
450 *	@pdev: I2O controller which should be removed
451 *
452 *	Reset the I2O controller, disable interrupts and remove all allocated
453 *	resources.
454 */
455static void __devexit i2o_pci_remove(struct pci_dev *pdev)
456{
457	struct i2o_controller *c;
458	c = pci_get_drvdata(pdev);
459
460	i2o_iop_remove(c);
461	i2o_pci_irq_disable(c);
462	i2o_pci_free(c);
463
464	pci_disable_device(pdev);
465
466	printk(KERN_INFO "%s: Controller removed.\n", c->name);
467
468	put_device(&c->device);
469};
470
471/* PCI driver for I2O controller */
472static struct pci_driver i2o_pci_driver = {
473	.name = "PCI_I2O",
474	.id_table = i2o_pci_ids,
475	.probe = i2o_pci_probe,
476	.remove = __devexit_p(i2o_pci_remove),
477};
478
479/**
480 *	i2o_pci_init - registers I2O PCI driver in PCI subsystem
481 *
482 *	Returns > 0 on success or negative error code on failure.
483 */
484int __init i2o_pci_init(void)
485{
486	return pci_register_driver(&i2o_pci_driver);
487};
488
489/**
490 *	i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem
491 */
492void __exit i2o_pci_exit(void)
493{
494	pci_unregister_driver(&i2o_pci_driver);
495};
496
497MODULE_DEVICE_TABLE(pci, i2o_pci_ids);
498