• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/vme/bridges/
1/*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute  it and/or modify it
13 * under  the terms of  the GNU General  Public License as published by the
14 * Free Software Foundation;  either version 2 of the  License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/dma-mapping.h>
24#include <linux/poll.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32
33#include "../vme.h"
34#include "../vme_bridge.h"
35#include "vme_ca91cx42.h"
36
37static int __init ca91cx42_init(void);
38static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
39static void ca91cx42_remove(struct pci_dev *);
40static void __exit ca91cx42_exit(void);
41
42/* Module parameters */
43static int geoid;
44
45static char driver_name[] = "vme_ca91cx42";
46
47static const struct pci_device_id ca91cx42_ids[] = {
48	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
49	{ },
50};
51
52static struct pci_driver ca91cx42_driver = {
53	.name = driver_name,
54	.id_table = ca91cx42_ids,
55	.probe = ca91cx42_probe,
56	.remove = ca91cx42_remove,
57};
58
59static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
60{
61	wake_up(&(bridge->dma_queue));
62
63	return CA91CX42_LINT_DMA;
64}
65
66static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
67{
68	int i;
69	u32 serviced = 0;
70
71	for (i = 0; i < 4; i++) {
72		if (stat & CA91CX42_LINT_LM[i]) {
73			/* We only enable interrupts if the callback is set */
74			bridge->lm_callback[i](i);
75			serviced |= CA91CX42_LINT_LM[i];
76		}
77	}
78
79	return serviced;
80}
81
82static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
83{
84	wake_up(&(bridge->mbox_queue));
85
86	return CA91CX42_LINT_MBOX;
87}
88
89static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
90{
91	wake_up(&(bridge->iack_queue));
92
93	return CA91CX42_LINT_SW_IACK;
94}
95
96static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
97{
98	int val;
99	struct ca91cx42_driver *bridge;
100
101	bridge = ca91cx42_bridge->driver_priv;
102
103	val = ioread32(bridge->base + DGCS);
104
105	if (!(val & 0x00000800)) {
106		dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
107			"Read Error DGCS=%08X\n", val);
108	}
109
110	return CA91CX42_LINT_VERR;
111}
112
113static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
114{
115	int val;
116	struct ca91cx42_driver *bridge;
117
118	bridge = ca91cx42_bridge->driver_priv;
119
120	val = ioread32(bridge->base + DGCS);
121
122	if (!(val & 0x00000800))
123		dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
124			"Read Error DGCS=%08X\n", val);
125
126	return CA91CX42_LINT_LERR;
127}
128
129
130static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
131	int stat)
132{
133	int vec, i, serviced = 0;
134	struct ca91cx42_driver *bridge;
135
136	bridge = ca91cx42_bridge->driver_priv;
137
138
139	for (i = 7; i > 0; i--) {
140		if (stat & (1 << i)) {
141			vec = ioread32(bridge->base +
142				CA91CX42_V_STATID[i]) & 0xff;
143
144			vme_irq_handler(ca91cx42_bridge, i, vec);
145
146			serviced |= (1 << i);
147		}
148	}
149
150	return serviced;
151}
152
153static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
154{
155	u32 stat, enable, serviced = 0;
156	struct vme_bridge *ca91cx42_bridge;
157	struct ca91cx42_driver *bridge;
158
159	ca91cx42_bridge = ptr;
160
161	bridge = ca91cx42_bridge->driver_priv;
162
163	enable = ioread32(bridge->base + LINT_EN);
164	stat = ioread32(bridge->base + LINT_STAT);
165
166	/* Only look at unmasked interrupts */
167	stat &= enable;
168
169	if (unlikely(!stat))
170		return IRQ_NONE;
171
172	if (stat & CA91CX42_LINT_DMA)
173		serviced |= ca91cx42_DMA_irqhandler(bridge);
174	if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
175			CA91CX42_LINT_LM3))
176		serviced |= ca91cx42_LM_irqhandler(bridge, stat);
177	if (stat & CA91CX42_LINT_MBOX)
178		serviced |= ca91cx42_MB_irqhandler(bridge, stat);
179	if (stat & CA91CX42_LINT_SW_IACK)
180		serviced |= ca91cx42_IACK_irqhandler(bridge);
181	if (stat & CA91CX42_LINT_VERR)
182		serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
183	if (stat & CA91CX42_LINT_LERR)
184		serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
185	if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
186			CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
187			CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
188			CA91CX42_LINT_VIRQ7))
189		serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
190
191	/* Clear serviced interrupts */
192	iowrite32(stat, bridge->base + LINT_STAT);
193
194	return IRQ_HANDLED;
195}
196
197static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
198{
199	int result, tmp;
200	struct pci_dev *pdev;
201	struct ca91cx42_driver *bridge;
202
203	bridge = ca91cx42_bridge->driver_priv;
204
205	/* Need pdev */
206	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
207
208	/* Initialise list for VME bus errors */
209	INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
210
211	mutex_init(&(ca91cx42_bridge->irq_mtx));
212
213	/* Disable interrupts from PCI to VME */
214	iowrite32(0, bridge->base + VINT_EN);
215
216	/* Disable PCI interrupts */
217	iowrite32(0, bridge->base + LINT_EN);
218	/* Clear Any Pending PCI Interrupts */
219	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
220
221	result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
222			driver_name, ca91cx42_bridge);
223	if (result) {
224		dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
225		       pdev->irq);
226		return result;
227	}
228
229	/* Ensure all interrupts are mapped to PCI Interrupt 0 */
230	iowrite32(0, bridge->base + LINT_MAP0);
231	iowrite32(0, bridge->base + LINT_MAP1);
232	iowrite32(0, bridge->base + LINT_MAP2);
233
234	/* Enable DMA, mailbox & LM Interrupts */
235	tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
236		CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
237		CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
238
239	iowrite32(tmp, bridge->base + LINT_EN);
240
241	return 0;
242}
243
244static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
245	struct pci_dev *pdev)
246{
247	/* Disable interrupts from PCI to VME */
248	iowrite32(0, bridge->base + VINT_EN);
249
250	/* Disable PCI interrupts */
251	iowrite32(0, bridge->base + LINT_EN);
252	/* Clear Any Pending PCI Interrupts */
253	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
254
255	free_irq(pdev->irq, pdev);
256}
257
258/*
259 * Set up an VME interrupt
260 */
261void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
262	int sync)
263
264{
265	struct pci_dev *pdev;
266	u32 tmp;
267	struct ca91cx42_driver *bridge;
268
269	bridge = ca91cx42_bridge->driver_priv;
270
271	/* Enable IRQ level */
272	tmp = ioread32(bridge->base + LINT_EN);
273
274	if (state == 0)
275		tmp &= ~CA91CX42_LINT_VIRQ[level];
276	else
277		tmp |= CA91CX42_LINT_VIRQ[level];
278
279	iowrite32(tmp, bridge->base + LINT_EN);
280
281	if ((state == 0) && (sync != 0)) {
282		pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
283			dev);
284
285		synchronize_irq(pdev->irq);
286	}
287}
288
289int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
290	int statid)
291{
292	u32 tmp;
293	struct ca91cx42_driver *bridge;
294
295	bridge = ca91cx42_bridge->driver_priv;
296
297	/* Universe can only generate even vectors */
298	if (statid & 1)
299		return -EINVAL;
300
301	mutex_lock(&(bridge->vme_int));
302
303	tmp = ioread32(bridge->base + VINT_EN);
304
305	/* Set Status/ID */
306	iowrite32(statid << 24, bridge->base + STATID);
307
308	/* Assert VMEbus IRQ */
309	tmp = tmp | (1 << (level + 24));
310	iowrite32(tmp, bridge->base + VINT_EN);
311
312	/* Wait for IACK */
313	wait_event_interruptible(bridge->iack_queue, 0);
314
315	/* Return interrupt to low state */
316	tmp = ioread32(bridge->base + VINT_EN);
317	tmp = tmp & ~(1 << (level + 24));
318	iowrite32(tmp, bridge->base + VINT_EN);
319
320	mutex_unlock(&(bridge->vme_int));
321
322	return 0;
323}
324
325int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
326	unsigned long long vme_base, unsigned long long size,
327	dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
328{
329	unsigned int i, addr = 0, granularity;
330	unsigned int temp_ctl = 0;
331	unsigned int vme_bound, pci_offset;
332	struct vme_bridge *ca91cx42_bridge;
333	struct ca91cx42_driver *bridge;
334
335	ca91cx42_bridge = image->parent;
336
337	bridge = ca91cx42_bridge->driver_priv;
338
339	i = image->number;
340
341	switch (aspace) {
342	case VME_A16:
343		addr |= CA91CX42_VSI_CTL_VAS_A16;
344		break;
345	case VME_A24:
346		addr |= CA91CX42_VSI_CTL_VAS_A24;
347		break;
348	case VME_A32:
349		addr |= CA91CX42_VSI_CTL_VAS_A32;
350		break;
351	case VME_USER1:
352		addr |= CA91CX42_VSI_CTL_VAS_USER1;
353		break;
354	case VME_USER2:
355		addr |= CA91CX42_VSI_CTL_VAS_USER2;
356		break;
357	case VME_A64:
358	case VME_CRCSR:
359	case VME_USER3:
360	case VME_USER4:
361	default:
362		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
363		return -EINVAL;
364		break;
365	}
366
367	/*
368	 * Bound address is a valid address for the window, adjust
369	 * accordingly
370	 */
371	vme_bound = vme_base + size;
372	pci_offset = pci_base - vme_base;
373
374	if ((i == 0) || (i == 4))
375		granularity = 0x1000;
376	else
377		granularity = 0x10000;
378
379	if (vme_base & (granularity - 1)) {
380		dev_err(ca91cx42_bridge->parent, "Invalid VME base "
381			"alignment\n");
382		return -EINVAL;
383	}
384	if (vme_bound & (granularity - 1)) {
385		dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
386			"alignment\n");
387		return -EINVAL;
388	}
389	if (pci_offset & (granularity - 1)) {
390		dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
391			"alignment\n");
392		return -EINVAL;
393	}
394
395	/* Disable while we are mucking around */
396	temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
397	temp_ctl &= ~CA91CX42_VSI_CTL_EN;
398	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
399
400	/* Setup mapping */
401	iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
402	iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
403	iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
404
405	/* Setup address space */
406	temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
407	temp_ctl |= addr;
408
409	/* Setup cycle types */
410	temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
411	if (cycle & VME_SUPER)
412		temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
413	if (cycle & VME_USER)
414		temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
415	if (cycle & VME_PROG)
416		temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
417	if (cycle & VME_DATA)
418		temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
419
420	/* Write ctl reg without enable */
421	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
422
423	if (enabled)
424		temp_ctl |= CA91CX42_VSI_CTL_EN;
425
426	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
427
428	return 0;
429}
430
431int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
432	unsigned long long *vme_base, unsigned long long *size,
433	dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
434{
435	unsigned int i, granularity = 0, ctl = 0;
436	unsigned long long vme_bound, pci_offset;
437	struct ca91cx42_driver *bridge;
438
439	bridge = image->parent->driver_priv;
440
441	i = image->number;
442
443	if ((i == 0) || (i == 4))
444		granularity = 0x1000;
445	else
446		granularity = 0x10000;
447
448	/* Read Registers */
449	ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
450
451	*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
452	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
453	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
454
455	*pci_base = (dma_addr_t)vme_base + pci_offset;
456	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
457
458	*enabled = 0;
459	*aspace = 0;
460	*cycle = 0;
461
462	if (ctl & CA91CX42_VSI_CTL_EN)
463		*enabled = 1;
464
465	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
466		*aspace = VME_A16;
467	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
468		*aspace = VME_A24;
469	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
470		*aspace = VME_A32;
471	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
472		*aspace = VME_USER1;
473	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
474		*aspace = VME_USER2;
475
476	if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
477		*cycle |= VME_SUPER;
478	if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
479		*cycle |= VME_USER;
480	if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
481		*cycle |= VME_PROG;
482	if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
483		*cycle |= VME_DATA;
484
485	return 0;
486}
487
488/*
489 * Allocate and map PCI Resource
490 */
491static int ca91cx42_alloc_resource(struct vme_master_resource *image,
492	unsigned long long size)
493{
494	unsigned long long existing_size;
495	int retval = 0;
496	struct pci_dev *pdev;
497	struct vme_bridge *ca91cx42_bridge;
498
499	ca91cx42_bridge = image->parent;
500
501	/* Find pci_dev container of dev */
502	if (ca91cx42_bridge->parent == NULL) {
503		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
504		return -EINVAL;
505	}
506	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
507
508	existing_size = (unsigned long long)(image->bus_resource.end -
509		image->bus_resource.start);
510
511	/* If the existing size is OK, return */
512	if (existing_size == (size - 1))
513		return 0;
514
515	if (existing_size != 0) {
516		iounmap(image->kern_base);
517		image->kern_base = NULL;
518		if (image->bus_resource.name != NULL)
519			kfree(image->bus_resource.name);
520		release_resource(&(image->bus_resource));
521		memset(&(image->bus_resource), 0, sizeof(struct resource));
522	}
523
524	if (image->bus_resource.name == NULL) {
525		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
526		if (image->bus_resource.name == NULL) {
527			dev_err(ca91cx42_bridge->parent, "Unable to allocate "
528				"memory for resource name\n");
529			retval = -ENOMEM;
530			goto err_name;
531		}
532	}
533
534	sprintf((char *)image->bus_resource.name, "%s.%d",
535		ca91cx42_bridge->name, image->number);
536
537	image->bus_resource.start = 0;
538	image->bus_resource.end = (unsigned long)size;
539	image->bus_resource.flags = IORESOURCE_MEM;
540
541	retval = pci_bus_alloc_resource(pdev->bus,
542		&(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
543		0, NULL, NULL);
544	if (retval) {
545		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
546			"resource for window %d size 0x%lx start 0x%lx\n",
547			image->number, (unsigned long)size,
548			(unsigned long)image->bus_resource.start);
549		goto err_resource;
550	}
551
552	image->kern_base = ioremap_nocache(
553		image->bus_resource.start, size);
554	if (image->kern_base == NULL) {
555		dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
556		retval = -ENOMEM;
557		goto err_remap;
558	}
559
560	return 0;
561
562	iounmap(image->kern_base);
563	image->kern_base = NULL;
564err_remap:
565	release_resource(&(image->bus_resource));
566err_resource:
567	kfree(image->bus_resource.name);
568	memset(&(image->bus_resource), 0, sizeof(struct resource));
569err_name:
570	return retval;
571}
572
573/*
574 * Free and unmap PCI Resource
575 */
576static void ca91cx42_free_resource(struct vme_master_resource *image)
577{
578	iounmap(image->kern_base);
579	image->kern_base = NULL;
580	release_resource(&(image->bus_resource));
581	kfree(image->bus_resource.name);
582	memset(&(image->bus_resource), 0, sizeof(struct resource));
583}
584
585
586int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
587	unsigned long long vme_base, unsigned long long size,
588	vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
589{
590	int retval = 0;
591	unsigned int i, granularity = 0;
592	unsigned int temp_ctl = 0;
593	unsigned long long pci_bound, vme_offset, pci_base;
594	struct vme_bridge *ca91cx42_bridge;
595	struct ca91cx42_driver *bridge;
596
597	ca91cx42_bridge = image->parent;
598
599	bridge = ca91cx42_bridge->driver_priv;
600
601	i = image->number;
602
603	if ((i == 0) || (i == 4))
604		granularity = 0x1000;
605	else
606		granularity = 0x10000;
607
608	/* Verify input data */
609	if (vme_base & (granularity - 1)) {
610		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
611			"alignment\n");
612		retval = -EINVAL;
613		goto err_window;
614	}
615	if (size & (granularity - 1)) {
616		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
617			"alignment\n");
618		retval = -EINVAL;
619		goto err_window;
620	}
621
622	spin_lock(&(image->lock));
623
624	/*
625	 * Let's allocate the resource here rather than further up the stack as
626	 * it avoids pushing loads of bus dependant stuff up the stack
627	 */
628	retval = ca91cx42_alloc_resource(image, size);
629	if (retval) {
630		spin_unlock(&(image->lock));
631		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
632			"for resource name\n");
633		retval = -ENOMEM;
634		goto err_res;
635	}
636
637	pci_base = (unsigned long long)image->bus_resource.start;
638
639	/*
640	 * Bound address is a valid address for the window, adjust
641	 * according to window granularity.
642	 */
643	pci_bound = pci_base + size;
644	vme_offset = vme_base - pci_base;
645
646	/* Disable while we are mucking around */
647	temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
648	temp_ctl &= ~CA91CX42_LSI_CTL_EN;
649	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
650
651	/* Setup cycle types */
652	temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
653	if (cycle & VME_BLT)
654		temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
655	if (cycle & VME_MBLT)
656		temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
657
658	/* Setup data width */
659	temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
660	switch (dwidth) {
661	case VME_D8:
662		temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
663		break;
664	case VME_D16:
665		temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
666		break;
667	case VME_D32:
668		temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
669		break;
670	case VME_D64:
671		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
672		break;
673	default:
674		spin_unlock(&(image->lock));
675		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
676		retval = -EINVAL;
677		goto err_dwidth;
678		break;
679	}
680
681	/* Setup address space */
682	temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
683	switch (aspace) {
684	case VME_A16:
685		temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
686		break;
687	case VME_A24:
688		temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
689		break;
690	case VME_A32:
691		temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
692		break;
693	case VME_CRCSR:
694		temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
695		break;
696	case VME_USER1:
697		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
698		break;
699	case VME_USER2:
700		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
701		break;
702	case VME_A64:
703	case VME_USER3:
704	case VME_USER4:
705	default:
706		spin_unlock(&(image->lock));
707		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
708		retval = -EINVAL;
709		goto err_aspace;
710		break;
711	}
712
713	temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
714	if (cycle & VME_SUPER)
715		temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
716	if (cycle & VME_PROG)
717		temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
718
719	/* Setup mapping */
720	iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
721	iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
722	iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
723
724	/* Write ctl reg without enable */
725	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
726
727	if (enabled)
728		temp_ctl |= CA91CX42_LSI_CTL_EN;
729
730	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
731
732	spin_unlock(&(image->lock));
733	return 0;
734
735err_aspace:
736err_dwidth:
737	ca91cx42_free_resource(image);
738err_res:
739err_window:
740	return retval;
741}
742
743int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
744	unsigned long long *vme_base, unsigned long long *size,
745	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
746{
747	unsigned int i, ctl;
748	unsigned long long pci_base, pci_bound, vme_offset;
749	struct ca91cx42_driver *bridge;
750
751	bridge = image->parent->driver_priv;
752
753	i = image->number;
754
755	ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
756
757	pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
758	vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
759	pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
760
761	*vme_base = pci_base + vme_offset;
762	*size = (unsigned long long)(pci_bound - pci_base);
763
764	*enabled = 0;
765	*aspace = 0;
766	*cycle = 0;
767	*dwidth = 0;
768
769	if (ctl & CA91CX42_LSI_CTL_EN)
770		*enabled = 1;
771
772	/* Setup address space */
773	switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
774	case CA91CX42_LSI_CTL_VAS_A16:
775		*aspace = VME_A16;
776		break;
777	case CA91CX42_LSI_CTL_VAS_A24:
778		*aspace = VME_A24;
779		break;
780	case CA91CX42_LSI_CTL_VAS_A32:
781		*aspace = VME_A32;
782		break;
783	case CA91CX42_LSI_CTL_VAS_CRCSR:
784		*aspace = VME_CRCSR;
785		break;
786	case CA91CX42_LSI_CTL_VAS_USER1:
787		*aspace = VME_USER1;
788		break;
789	case CA91CX42_LSI_CTL_VAS_USER2:
790		*aspace = VME_USER2;
791		break;
792	}
793
794	/* Setup cycle types */
795	if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
796		*cycle |= VME_BLT;
797	else
798		*cycle |= VME_SCT;
799
800	if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
801		*cycle |= VME_SUPER;
802	else
803		*cycle |= VME_USER;
804
805	if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
806		*cycle = VME_PROG;
807	else
808		*cycle = VME_DATA;
809
810	/* Setup data width */
811	switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
812	case CA91CX42_LSI_CTL_VDW_D8:
813		*dwidth = VME_D8;
814		break;
815	case CA91CX42_LSI_CTL_VDW_D16:
816		*dwidth = VME_D16;
817		break;
818	case CA91CX42_LSI_CTL_VDW_D32:
819		*dwidth = VME_D32;
820		break;
821	case CA91CX42_LSI_CTL_VDW_D64:
822		*dwidth = VME_D64;
823		break;
824	}
825
826	return 0;
827}
828
829int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
830	unsigned long long *vme_base, unsigned long long *size,
831	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
832{
833	int retval;
834
835	spin_lock(&(image->lock));
836
837	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
838		cycle, dwidth);
839
840	spin_unlock(&(image->lock));
841
842	return retval;
843}
844
845ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
846	size_t count, loff_t offset)
847{
848	ssize_t retval;
849
850	spin_lock(&(image->lock));
851
852	memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
853	retval = count;
854
855	spin_unlock(&(image->lock));
856
857	return retval;
858}
859
860ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
861	size_t count, loff_t offset)
862{
863	int retval = 0;
864
865	spin_lock(&(image->lock));
866
867	memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
868	retval = count;
869
870	spin_unlock(&(image->lock));
871
872	return retval;
873}
874
875unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
876	unsigned int mask, unsigned int compare, unsigned int swap,
877	loff_t offset)
878{
879	u32 pci_addr, result;
880	int i;
881	struct ca91cx42_driver *bridge;
882	struct device *dev;
883
884	bridge = image->parent->driver_priv;
885	dev = image->parent->parent;
886
887	/* Find the PCI address that maps to the desired VME address */
888	i = image->number;
889
890	/* Locking as we can only do one of these at a time */
891	mutex_lock(&(bridge->vme_rmw));
892
893	/* Lock image */
894	spin_lock(&(image->lock));
895
896	pci_addr = (u32)image->kern_base + offset;
897
898	/* Address must be 4-byte aligned */
899	if (pci_addr & 0x3) {
900		dev_err(dev, "RMW Address not 4-byte aligned\n");
901		result = -EINVAL;
902		goto out;
903	}
904
905	/* Ensure RMW Disabled whilst configuring */
906	iowrite32(0, bridge->base + SCYC_CTL);
907
908	/* Configure registers */
909	iowrite32(mask, bridge->base + SCYC_EN);
910	iowrite32(compare, bridge->base + SCYC_CMP);
911	iowrite32(swap, bridge->base + SCYC_SWP);
912	iowrite32(pci_addr, bridge->base + SCYC_ADDR);
913
914	/* Enable RMW */
915	iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
916
917	/* Kick process off with a read to the required address. */
918	result = ioread32(image->kern_base + offset);
919
920	/* Disable RMW */
921	iowrite32(0, bridge->base + SCYC_CTL);
922
923out:
924	spin_unlock(&(image->lock));
925
926	mutex_unlock(&(bridge->vme_rmw));
927
928	return result;
929}
930
931int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
932	struct vme_dma_attr *dest, size_t count)
933{
934	struct ca91cx42_dma_entry *entry, *prev;
935	struct vme_dma_pci *pci_attr;
936	struct vme_dma_vme *vme_attr;
937	dma_addr_t desc_ptr;
938	int retval = 0;
939	struct device *dev;
940
941	dev = list->parent->parent->parent;
942
943	entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
944	if (entry == NULL) {
945		dev_err(dev, "Failed to allocate memory for dma resource "
946			"structure\n");
947		retval = -ENOMEM;
948		goto err_mem;
949	}
950
951	/* Test descriptor alignment */
952	if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
953		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
954			"required: %p\n", &(entry->descriptor));
955		retval = -EINVAL;
956		goto err_align;
957	}
958
959	memset(&(entry->descriptor), 0, sizeof(struct ca91cx42_dma_descriptor));
960
961	if (dest->type == VME_DMA_VME) {
962		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
963		vme_attr = dest->private;
964		pci_attr = src->private;
965	} else {
966		vme_attr = src->private;
967		pci_attr = dest->private;
968	}
969
970	/* Check we can do fullfill required attributes */
971	if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
972		VME_USER2)) != 0) {
973
974		dev_err(dev, "Unsupported cycle type\n");
975		retval = -EINVAL;
976		goto err_aspace;
977	}
978
979	if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
980		VME_PROG | VME_DATA)) != 0) {
981
982		dev_err(dev, "Unsupported cycle type\n");
983		retval = -EINVAL;
984		goto err_cycle;
985	}
986
987	/* Check to see if we can fullfill source and destination */
988	if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
989		((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
990
991		dev_err(dev, "Cannot perform transfer with this "
992			"source-destination combination\n");
993		retval = -EINVAL;
994		goto err_direct;
995	}
996
997	/* Setup cycle types */
998	if (vme_attr->cycle & VME_BLT)
999		entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1000
1001	/* Setup data width */
1002	switch (vme_attr->dwidth) {
1003	case VME_D8:
1004		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1005		break;
1006	case VME_D16:
1007		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1008		break;
1009	case VME_D32:
1010		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1011		break;
1012	case VME_D64:
1013		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1014		break;
1015	default:
1016		dev_err(dev, "Invalid data width\n");
1017		return -EINVAL;
1018	}
1019
1020	/* Setup address space */
1021	switch (vme_attr->aspace) {
1022	case VME_A16:
1023		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1024		break;
1025	case VME_A24:
1026		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1027		break;
1028	case VME_A32:
1029		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1030		break;
1031	case VME_USER1:
1032		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1033		break;
1034	case VME_USER2:
1035		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1036		break;
1037	default:
1038		dev_err(dev, "Invalid address space\n");
1039		return -EINVAL;
1040		break;
1041	}
1042
1043	if (vme_attr->cycle & VME_SUPER)
1044		entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1045	if (vme_attr->cycle & VME_PROG)
1046		entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1047
1048	entry->descriptor.dtbc = count;
1049	entry->descriptor.dla = pci_attr->address;
1050	entry->descriptor.dva = vme_attr->address;
1051	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1052
1053	/* Add to list */
1054	list_add_tail(&(entry->list), &(list->entries));
1055
1056	/* Fill out previous descriptors "Next Address" */
1057	if (entry->list.prev != &(list->entries)) {
1058		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1059			list);
1060		/* We need the bus address for the pointer */
1061		desc_ptr = virt_to_bus(&(entry->descriptor));
1062		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1063	}
1064
1065	return 0;
1066
1067err_cycle:
1068err_aspace:
1069err_direct:
1070err_align:
1071	kfree(entry);
1072err_mem:
1073	return retval;
1074}
1075
1076static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1077{
1078	u32 tmp;
1079	struct ca91cx42_driver *bridge;
1080
1081	bridge = ca91cx42_bridge->driver_priv;
1082
1083	tmp = ioread32(bridge->base + DGCS);
1084
1085	if (tmp & CA91CX42_DGCS_ACT)
1086		return 0;
1087	else
1088		return 1;
1089}
1090
1091int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1092{
1093	struct vme_dma_resource *ctrlr;
1094	struct ca91cx42_dma_entry *entry;
1095	int retval = 0;
1096	dma_addr_t bus_addr;
1097	u32 val;
1098	struct device *dev;
1099	struct ca91cx42_driver *bridge;
1100
1101	ctrlr = list->parent;
1102
1103	bridge = ctrlr->parent->driver_priv;
1104	dev = ctrlr->parent->parent;
1105
1106	mutex_lock(&(ctrlr->mtx));
1107
1108	if (!(list_empty(&(ctrlr->running)))) {
1109		/* Need to add to pending here */
1110		mutex_unlock(&(ctrlr->mtx));
1111		return -EBUSY;
1112	} else {
1113		list_add(&(list->list), &(ctrlr->running));
1114	}
1115
1116	/* Get first bus address and write into registers */
1117	entry = list_first_entry(&(list->entries), struct ca91cx42_dma_entry,
1118		list);
1119
1120	bus_addr = virt_to_bus(&(entry->descriptor));
1121
1122	mutex_unlock(&(ctrlr->mtx));
1123
1124	iowrite32(0, bridge->base + DTBC);
1125	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1126
1127	/* Start the operation */
1128	val = ioread32(bridge->base + DGCS);
1129
1130	val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1131
1132	val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1133		CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1134		CA91CX42_DGCS_PERR);
1135
1136	iowrite32(val, bridge->base + DGCS);
1137
1138	val |= CA91CX42_DGCS_GO;
1139
1140	iowrite32(val, bridge->base + DGCS);
1141
1142	wait_event_interruptible(bridge->dma_queue,
1143		ca91cx42_dma_busy(ctrlr->parent));
1144
1145	/*
1146	 * Read status register, this register is valid until we kick off a
1147	 * new transfer.
1148	 */
1149	val = ioread32(bridge->base + DGCS);
1150
1151	if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1152		CA91CX42_DGCS_PERR)) {
1153
1154		dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1155		val = ioread32(bridge->base + DCTL);
1156	}
1157
1158	/* Remove list from running list */
1159	mutex_lock(&(ctrlr->mtx));
1160	list_del(&(list->list));
1161	mutex_unlock(&(ctrlr->mtx));
1162
1163	return retval;
1164
1165}
1166
1167int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1168{
1169	struct list_head *pos, *temp;
1170	struct ca91cx42_dma_entry *entry;
1171
1172	/* detach and free each entry */
1173	list_for_each_safe(pos, temp, &(list->entries)) {
1174		list_del(pos);
1175		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1176		kfree(entry);
1177	}
1178
1179	return 0;
1180}
1181
1182/*
1183 * All 4 location monitors reside at the same base - this is therefore a
1184 * system wide configuration.
1185 *
1186 * This does not enable the LM monitor - that should be done when the first
1187 * callback is attached and disabled when the last callback is removed.
1188 */
1189int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1190	vme_address_t aspace, vme_cycle_t cycle)
1191{
1192	u32 temp_base, lm_ctl = 0;
1193	int i;
1194	struct ca91cx42_driver *bridge;
1195	struct device *dev;
1196
1197	bridge = lm->parent->driver_priv;
1198	dev = lm->parent->parent;
1199
1200	/* Check the alignment of the location monitor */
1201	temp_base = (u32)lm_base;
1202	if (temp_base & 0xffff) {
1203		dev_err(dev, "Location monitor must be aligned to 64KB "
1204			"boundary");
1205		return -EINVAL;
1206	}
1207
1208	mutex_lock(&(lm->mtx));
1209
1210	/* If we already have a callback attached, we can't move it! */
1211	for (i = 0; i < lm->monitors; i++) {
1212		if (bridge->lm_callback[i] != NULL) {
1213			mutex_unlock(&(lm->mtx));
1214			dev_err(dev, "Location monitor callback attached, "
1215				"can't reset\n");
1216			return -EBUSY;
1217		}
1218	}
1219
1220	switch (aspace) {
1221	case VME_A16:
1222		lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1223		break;
1224	case VME_A24:
1225		lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1226		break;
1227	case VME_A32:
1228		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1229		break;
1230	default:
1231		mutex_unlock(&(lm->mtx));
1232		dev_err(dev, "Invalid address space\n");
1233		return -EINVAL;
1234		break;
1235	}
1236
1237	if (cycle & VME_SUPER)
1238		lm_ctl |= CA91CX42_LM_CTL_SUPR;
1239	if (cycle & VME_USER)
1240		lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1241	if (cycle & VME_PROG)
1242		lm_ctl |= CA91CX42_LM_CTL_PGM;
1243	if (cycle & VME_DATA)
1244		lm_ctl |= CA91CX42_LM_CTL_DATA;
1245
1246	iowrite32(lm_base, bridge->base + LM_BS);
1247	iowrite32(lm_ctl, bridge->base + LM_CTL);
1248
1249	mutex_unlock(&(lm->mtx));
1250
1251	return 0;
1252}
1253
1254/* Get configuration of the callback monitor and return whether it is enabled
1255 * or disabled.
1256 */
1257int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1258	vme_address_t *aspace, vme_cycle_t *cycle)
1259{
1260	u32 lm_ctl, enabled = 0;
1261	struct ca91cx42_driver *bridge;
1262
1263	bridge = lm->parent->driver_priv;
1264
1265	mutex_lock(&(lm->mtx));
1266
1267	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1268	lm_ctl = ioread32(bridge->base + LM_CTL);
1269
1270	if (lm_ctl & CA91CX42_LM_CTL_EN)
1271		enabled = 1;
1272
1273	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1274		*aspace = VME_A16;
1275	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1276		*aspace = VME_A24;
1277	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1278		*aspace = VME_A32;
1279
1280	*cycle = 0;
1281	if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1282		*cycle |= VME_SUPER;
1283	if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1284		*cycle |= VME_USER;
1285	if (lm_ctl & CA91CX42_LM_CTL_PGM)
1286		*cycle |= VME_PROG;
1287	if (lm_ctl & CA91CX42_LM_CTL_DATA)
1288		*cycle |= VME_DATA;
1289
1290	mutex_unlock(&(lm->mtx));
1291
1292	return enabled;
1293}
1294
1295/*
1296 * Attach a callback to a specific location monitor.
1297 *
1298 * Callback will be passed the monitor triggered.
1299 */
1300int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1301	void (*callback)(int))
1302{
1303	u32 lm_ctl, tmp;
1304	struct ca91cx42_driver *bridge;
1305	struct device *dev;
1306
1307	bridge = lm->parent->driver_priv;
1308	dev = lm->parent->parent;
1309
1310	mutex_lock(&(lm->mtx));
1311
1312	/* Ensure that the location monitor is configured - need PGM or DATA */
1313	lm_ctl = ioread32(bridge->base + LM_CTL);
1314	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1315		mutex_unlock(&(lm->mtx));
1316		dev_err(dev, "Location monitor not properly configured\n");
1317		return -EINVAL;
1318	}
1319
1320	/* Check that a callback isn't already attached */
1321	if (bridge->lm_callback[monitor] != NULL) {
1322		mutex_unlock(&(lm->mtx));
1323		dev_err(dev, "Existing callback attached\n");
1324		return -EBUSY;
1325	}
1326
1327	/* Attach callback */
1328	bridge->lm_callback[monitor] = callback;
1329
1330	/* Enable Location Monitor interrupt */
1331	tmp = ioread32(bridge->base + LINT_EN);
1332	tmp |= CA91CX42_LINT_LM[monitor];
1333	iowrite32(tmp, bridge->base + LINT_EN);
1334
1335	/* Ensure that global Location Monitor Enable set */
1336	if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1337		lm_ctl |= CA91CX42_LM_CTL_EN;
1338		iowrite32(lm_ctl, bridge->base + LM_CTL);
1339	}
1340
1341	mutex_unlock(&(lm->mtx));
1342
1343	return 0;
1344}
1345
1346/*
1347 * Detach a callback function forn a specific location monitor.
1348 */
1349int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1350{
1351	u32 tmp;
1352	struct ca91cx42_driver *bridge;
1353
1354	bridge = lm->parent->driver_priv;
1355
1356	mutex_lock(&(lm->mtx));
1357
1358	/* Disable Location Monitor and ensure previous interrupts are clear */
1359	tmp = ioread32(bridge->base + LINT_EN);
1360	tmp &= ~CA91CX42_LINT_LM[monitor];
1361	iowrite32(tmp, bridge->base + LINT_EN);
1362
1363	iowrite32(CA91CX42_LINT_LM[monitor],
1364		 bridge->base + LINT_STAT);
1365
1366	/* Detach callback */
1367	bridge->lm_callback[monitor] = NULL;
1368
1369	/* If all location monitors disabled, disable global Location Monitor */
1370	if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1371			CA91CX42_LINT_LM3)) == 0) {
1372		tmp = ioread32(bridge->base + LM_CTL);
1373		tmp &= ~CA91CX42_LM_CTL_EN;
1374		iowrite32(tmp, bridge->base + LM_CTL);
1375	}
1376
1377	mutex_unlock(&(lm->mtx));
1378
1379	return 0;
1380}
1381
1382int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1383{
1384	u32 slot = 0;
1385	struct ca91cx42_driver *bridge;
1386
1387	bridge = ca91cx42_bridge->driver_priv;
1388
1389	if (!geoid) {
1390		slot = ioread32(bridge->base + VCSR_BS);
1391		slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1392	} else
1393		slot = geoid;
1394
1395	return (int)slot;
1396
1397}
1398
1399static int __init ca91cx42_init(void)
1400{
1401	return pci_register_driver(&ca91cx42_driver);
1402}
1403
1404/*
1405 * Configure CR/CSR space
1406 *
1407 * Access to the CR/CSR can be configured at power-up. The location of the
1408 * CR/CSR registers in the CR/CSR address space is determined by the boards
1409 * Auto-ID or Geographic address. This function ensures that the window is
1410 * enabled at an offset consistent with the boards geopgraphic address.
1411 */
1412static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1413	struct pci_dev *pdev)
1414{
1415	unsigned int crcsr_addr;
1416	int tmp, slot;
1417	struct ca91cx42_driver *bridge;
1418
1419	bridge = ca91cx42_bridge->driver_priv;
1420
1421	slot = ca91cx42_slot_get(ca91cx42_bridge);
1422
1423	/* Write CSR Base Address if slot ID is supplied as a module param */
1424	if (geoid)
1425		iowrite32(geoid << 27, bridge->base + VCSR_BS);
1426
1427	dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1428	if (slot == 0) {
1429		dev_err(&pdev->dev, "Slot number is unset, not configuring "
1430			"CR/CSR space\n");
1431		return -EINVAL;
1432	}
1433
1434	/* Allocate mem for CR/CSR image */
1435	bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1436		&(bridge->crcsr_bus));
1437	if (bridge->crcsr_kernel == NULL) {
1438		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1439			"image\n");
1440		return -ENOMEM;
1441	}
1442
1443	memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1444
1445	crcsr_addr = slot * (512 * 1024);
1446	iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1447
1448	tmp = ioread32(bridge->base + VCSR_CTL);
1449	tmp |= CA91CX42_VCSR_CTL_EN;
1450	iowrite32(tmp, bridge->base + VCSR_CTL);
1451
1452	return 0;
1453}
1454
1455static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1456	struct pci_dev *pdev)
1457{
1458	u32 tmp;
1459	struct ca91cx42_driver *bridge;
1460
1461	bridge = ca91cx42_bridge->driver_priv;
1462
1463	/* Turn off CR/CSR space */
1464	tmp = ioread32(bridge->base + VCSR_CTL);
1465	tmp &= ~CA91CX42_VCSR_CTL_EN;
1466	iowrite32(tmp, bridge->base + VCSR_CTL);
1467
1468	/* Free image */
1469	iowrite32(0, bridge->base + VCSR_TO);
1470
1471	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1472		bridge->crcsr_bus);
1473}
1474
1475static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1476{
1477	int retval, i;
1478	u32 data;
1479	struct list_head *pos = NULL;
1480	struct vme_bridge *ca91cx42_bridge;
1481	struct ca91cx42_driver *ca91cx42_device;
1482	struct vme_master_resource *master_image;
1483	struct vme_slave_resource *slave_image;
1484	struct vme_dma_resource *dma_ctrlr;
1485	struct vme_lm_resource *lm;
1486
1487	/* We want to support more than one of each bridge so we need to
1488	 * dynamically allocate the bridge structure
1489	 */
1490	ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1491
1492	if (ca91cx42_bridge == NULL) {
1493		dev_err(&pdev->dev, "Failed to allocate memory for device "
1494			"structure\n");
1495		retval = -ENOMEM;
1496		goto err_struct;
1497	}
1498
1499	ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1500
1501	if (ca91cx42_device == NULL) {
1502		dev_err(&pdev->dev, "Failed to allocate memory for device "
1503			"structure\n");
1504		retval = -ENOMEM;
1505		goto err_driver;
1506	}
1507
1508	ca91cx42_bridge->driver_priv = ca91cx42_device;
1509
1510	/* Enable the device */
1511	retval = pci_enable_device(pdev);
1512	if (retval) {
1513		dev_err(&pdev->dev, "Unable to enable device\n");
1514		goto err_enable;
1515	}
1516
1517	/* Map Registers */
1518	retval = pci_request_regions(pdev, driver_name);
1519	if (retval) {
1520		dev_err(&pdev->dev, "Unable to reserve resources\n");
1521		goto err_resource;
1522	}
1523
1524	/* map registers in BAR 0 */
1525	ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1526		4096);
1527	if (!ca91cx42_device->base) {
1528		dev_err(&pdev->dev, "Unable to remap CRG region\n");
1529		retval = -EIO;
1530		goto err_remap;
1531	}
1532
1533	/* Check to see if the mapping worked out */
1534	data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1535	if (data != PCI_VENDOR_ID_TUNDRA) {
1536		dev_err(&pdev->dev, "PCI_ID check failed\n");
1537		retval = -EIO;
1538		goto err_test;
1539	}
1540
1541	/* Initialize wait queues & mutual exclusion flags */
1542	init_waitqueue_head(&(ca91cx42_device->dma_queue));
1543	init_waitqueue_head(&(ca91cx42_device->iack_queue));
1544	mutex_init(&(ca91cx42_device->vme_int));
1545	mutex_init(&(ca91cx42_device->vme_rmw));
1546
1547	ca91cx42_bridge->parent = &(pdev->dev);
1548	strcpy(ca91cx42_bridge->name, driver_name);
1549
1550	/* Setup IRQ */
1551	retval = ca91cx42_irq_init(ca91cx42_bridge);
1552	if (retval != 0) {
1553		dev_err(&pdev->dev, "Chip Initialization failed.\n");
1554		goto err_irq;
1555	}
1556
1557	/* Add master windows to list */
1558	INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1559	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1560		master_image = kmalloc(sizeof(struct vme_master_resource),
1561			GFP_KERNEL);
1562		if (master_image == NULL) {
1563			dev_err(&pdev->dev, "Failed to allocate memory for "
1564			"master resource structure\n");
1565			retval = -ENOMEM;
1566			goto err_master;
1567		}
1568		master_image->parent = ca91cx42_bridge;
1569		spin_lock_init(&(master_image->lock));
1570		master_image->locked = 0;
1571		master_image->number = i;
1572		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1573			VME_CRCSR | VME_USER1 | VME_USER2;
1574		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1575			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1576		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1577		memset(&(master_image->bus_resource), 0,
1578			sizeof(struct resource));
1579		master_image->kern_base  = NULL;
1580		list_add_tail(&(master_image->list),
1581			&(ca91cx42_bridge->master_resources));
1582	}
1583
1584	/* Add slave windows to list */
1585	INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1586	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1587		slave_image = kmalloc(sizeof(struct vme_slave_resource),
1588			GFP_KERNEL);
1589		if (slave_image == NULL) {
1590			dev_err(&pdev->dev, "Failed to allocate memory for "
1591			"slave resource structure\n");
1592			retval = -ENOMEM;
1593			goto err_slave;
1594		}
1595		slave_image->parent = ca91cx42_bridge;
1596		mutex_init(&(slave_image->mtx));
1597		slave_image->locked = 0;
1598		slave_image->number = i;
1599		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1600			VME_USER2;
1601
1602		/* Only windows 0 and 4 support A16 */
1603		if (i == 0 || i == 4)
1604			slave_image->address_attr |= VME_A16;
1605
1606		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1607			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1608		list_add_tail(&(slave_image->list),
1609			&(ca91cx42_bridge->slave_resources));
1610	}
1611
1612	/* Add dma engines to list */
1613	INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1614	for (i = 0; i < CA91C142_MAX_DMA; i++) {
1615		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1616			GFP_KERNEL);
1617		if (dma_ctrlr == NULL) {
1618			dev_err(&pdev->dev, "Failed to allocate memory for "
1619			"dma resource structure\n");
1620			retval = -ENOMEM;
1621			goto err_dma;
1622		}
1623		dma_ctrlr->parent = ca91cx42_bridge;
1624		mutex_init(&(dma_ctrlr->mtx));
1625		dma_ctrlr->locked = 0;
1626		dma_ctrlr->number = i;
1627		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1628			VME_DMA_MEM_TO_VME;
1629		INIT_LIST_HEAD(&(dma_ctrlr->pending));
1630		INIT_LIST_HEAD(&(dma_ctrlr->running));
1631		list_add_tail(&(dma_ctrlr->list),
1632			&(ca91cx42_bridge->dma_resources));
1633	}
1634
1635	/* Add location monitor to list */
1636	INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1637	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1638	if (lm == NULL) {
1639		dev_err(&pdev->dev, "Failed to allocate memory for "
1640		"location monitor resource structure\n");
1641		retval = -ENOMEM;
1642		goto err_lm;
1643	}
1644	lm->parent = ca91cx42_bridge;
1645	mutex_init(&(lm->mtx));
1646	lm->locked = 0;
1647	lm->number = 1;
1648	lm->monitors = 4;
1649	list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1650
1651	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1652	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1653	ca91cx42_bridge->master_get = ca91cx42_master_get;
1654	ca91cx42_bridge->master_set = ca91cx42_master_set;
1655	ca91cx42_bridge->master_read = ca91cx42_master_read;
1656	ca91cx42_bridge->master_write = ca91cx42_master_write;
1657	ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1658	ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1659	ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1660	ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1661	ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1662	ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1663	ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1664	ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1665	ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1666	ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1667	ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1668
1669	data = ioread32(ca91cx42_device->base + MISC_CTL);
1670	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1671		(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1672	dev_info(&pdev->dev, "Slot ID is %d\n",
1673		ca91cx42_slot_get(ca91cx42_bridge));
1674
1675	if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1676		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1677
1678	/* Need to save ca91cx42_bridge pointer locally in link list for use in
1679	 * ca91cx42_remove()
1680	 */
1681	retval = vme_register_bridge(ca91cx42_bridge);
1682	if (retval != 0) {
1683		dev_err(&pdev->dev, "Chip Registration failed.\n");
1684		goto err_reg;
1685	}
1686
1687	pci_set_drvdata(pdev, ca91cx42_bridge);
1688
1689	return 0;
1690
1691	vme_unregister_bridge(ca91cx42_bridge);
1692err_reg:
1693	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1694err_lm:
1695	/* resources are stored in link list */
1696	list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1697		lm = list_entry(pos, struct vme_lm_resource, list);
1698		list_del(pos);
1699		kfree(lm);
1700	}
1701err_dma:
1702	/* resources are stored in link list */
1703	list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1704		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1705		list_del(pos);
1706		kfree(dma_ctrlr);
1707	}
1708err_slave:
1709	/* resources are stored in link list */
1710	list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1711		slave_image = list_entry(pos, struct vme_slave_resource, list);
1712		list_del(pos);
1713		kfree(slave_image);
1714	}
1715err_master:
1716	/* resources are stored in link list */
1717	list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1718		master_image = list_entry(pos, struct vme_master_resource,
1719			list);
1720		list_del(pos);
1721		kfree(master_image);
1722	}
1723
1724	ca91cx42_irq_exit(ca91cx42_device, pdev);
1725err_irq:
1726err_test:
1727	iounmap(ca91cx42_device->base);
1728err_remap:
1729	pci_release_regions(pdev);
1730err_resource:
1731	pci_disable_device(pdev);
1732err_enable:
1733	kfree(ca91cx42_device);
1734err_driver:
1735	kfree(ca91cx42_bridge);
1736err_struct:
1737	return retval;
1738
1739}
1740
1741void ca91cx42_remove(struct pci_dev *pdev)
1742{
1743	struct list_head *pos = NULL;
1744	struct vme_master_resource *master_image;
1745	struct vme_slave_resource *slave_image;
1746	struct vme_dma_resource *dma_ctrlr;
1747	struct vme_lm_resource *lm;
1748	struct ca91cx42_driver *bridge;
1749	struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1750
1751	bridge = ca91cx42_bridge->driver_priv;
1752
1753
1754	/* Turn off Ints */
1755	iowrite32(0, bridge->base + LINT_EN);
1756
1757	/* Turn off the windows */
1758	iowrite32(0x00800000, bridge->base + LSI0_CTL);
1759	iowrite32(0x00800000, bridge->base + LSI1_CTL);
1760	iowrite32(0x00800000, bridge->base + LSI2_CTL);
1761	iowrite32(0x00800000, bridge->base + LSI3_CTL);
1762	iowrite32(0x00800000, bridge->base + LSI4_CTL);
1763	iowrite32(0x00800000, bridge->base + LSI5_CTL);
1764	iowrite32(0x00800000, bridge->base + LSI6_CTL);
1765	iowrite32(0x00800000, bridge->base + LSI7_CTL);
1766	iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1767	iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1768	iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1769	iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1770	iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1771	iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1772	iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1773	iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1774
1775	vme_unregister_bridge(ca91cx42_bridge);
1776
1777	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1778
1779	/* resources are stored in link list */
1780	list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1781		lm = list_entry(pos, struct vme_lm_resource, list);
1782		list_del(pos);
1783		kfree(lm);
1784	}
1785
1786	/* resources are stored in link list */
1787	list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1788		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1789		list_del(pos);
1790		kfree(dma_ctrlr);
1791	}
1792
1793	/* resources are stored in link list */
1794	list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1795		slave_image = list_entry(pos, struct vme_slave_resource, list);
1796		list_del(pos);
1797		kfree(slave_image);
1798	}
1799
1800	/* resources are stored in link list */
1801	list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1802		master_image = list_entry(pos, struct vme_master_resource,
1803			list);
1804		list_del(pos);
1805		kfree(master_image);
1806	}
1807
1808	ca91cx42_irq_exit(bridge, pdev);
1809
1810	iounmap(bridge->base);
1811
1812	pci_release_regions(pdev);
1813
1814	pci_disable_device(pdev);
1815
1816	kfree(ca91cx42_bridge);
1817}
1818
1819static void __exit ca91cx42_exit(void)
1820{
1821	pci_unregister_driver(&ca91cx42_driver);
1822}
1823
1824MODULE_PARM_DESC(geoid, "Override geographical addressing");
1825module_param(geoid, int, 0);
1826
1827MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1828MODULE_LICENSE("GPL");
1829
1830module_init(ca91cx42_init);
1831module_exit(ca91cx42_exit);
1832