• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/staging/vme/
1/*
2 * VME Bridge Framework
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute  it and/or modify it
11 * under  the terms of  the GNU General  Public License as published by the
12 * Free Software Foundation;  either version 2 of the  License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/highmem.h>
25#include <linux/interrupt.h>
26#include <linux/pagemap.h>
27#include <linux/device.h>
28#include <linux/dma-mapping.h>
29#include <linux/syscalls.h>
30#include <linux/mutex.h>
31#include <linux/spinlock.h>
32#include <linux/slab.h>
33
34#include "vme.h"
35#include "vme_bridge.h"
36
37/* Bitmask and mutex to keep track of bridge numbers */
38static unsigned int vme_bus_numbers;
39static DEFINE_MUTEX(vme_bus_num_mtx);
40
41static void __exit vme_exit(void);
42static int __init vme_init(void);
43
44
45/*
46 * Find the bridge resource associated with a specific device resource
47 */
48static struct vme_bridge *dev_to_bridge(struct device *dev)
49{
50	return dev->platform_data;
51}
52
53/*
54 * Find the bridge that the resource is associated with.
55 */
56static struct vme_bridge *find_bridge(struct vme_resource *resource)
57{
58	/* Get list to search */
59	switch (resource->type) {
60	case VME_MASTER:
61		return list_entry(resource->entry, struct vme_master_resource,
62			list)->parent;
63		break;
64	case VME_SLAVE:
65		return list_entry(resource->entry, struct vme_slave_resource,
66			list)->parent;
67		break;
68	case VME_DMA:
69		return list_entry(resource->entry, struct vme_dma_resource,
70			list)->parent;
71		break;
72	case VME_LM:
73		return list_entry(resource->entry, struct vme_lm_resource,
74			list)->parent;
75		break;
76	default:
77		printk(KERN_ERR "Unknown resource type\n");
78		return NULL;
79		break;
80	}
81}
82
83void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
84	dma_addr_t *dma)
85{
86	struct vme_bridge *bridge;
87	struct pci_dev *pdev;
88
89	if (resource == NULL) {
90		printk(KERN_ERR "No resource\n");
91		return NULL;
92	}
93
94	bridge = find_bridge(resource);
95	if (bridge == NULL) {
96		printk(KERN_ERR "Can't find bridge\n");
97		return NULL;
98	}
99
100	/* Find pci_dev container of dev */
101	if (bridge->parent == NULL) {
102		printk(KERN_ERR "Dev entry NULL\n");
103		return NULL;
104	}
105	pdev = container_of(bridge->parent, struct pci_dev, dev);
106
107	return pci_alloc_consistent(pdev, size, dma);
108}
109EXPORT_SYMBOL(vme_alloc_consistent);
110
111void vme_free_consistent(struct vme_resource *resource, size_t size,
112	void *vaddr, dma_addr_t dma)
113{
114	struct vme_bridge *bridge;
115	struct pci_dev *pdev;
116
117	if (resource == NULL) {
118		printk(KERN_ERR "No resource\n");
119		return;
120	}
121
122	bridge = find_bridge(resource);
123	if (bridge == NULL) {
124		printk(KERN_ERR "Can't find bridge\n");
125		return;
126	}
127
128	/* Find pci_dev container of dev */
129	pdev = container_of(bridge->parent, struct pci_dev, dev);
130
131	pci_free_consistent(pdev, size, vaddr, dma);
132}
133EXPORT_SYMBOL(vme_free_consistent);
134
135size_t vme_get_size(struct vme_resource *resource)
136{
137	int enabled, retval;
138	unsigned long long base, size;
139	dma_addr_t buf_base;
140	vme_address_t aspace;
141	vme_cycle_t cycle;
142	vme_width_t dwidth;
143
144	switch (resource->type) {
145	case VME_MASTER:
146		retval = vme_master_get(resource, &enabled, &base, &size,
147			&aspace, &cycle, &dwidth);
148
149		return size;
150		break;
151	case VME_SLAVE:
152		retval = vme_slave_get(resource, &enabled, &base, &size,
153			&buf_base, &aspace, &cycle);
154
155		return size;
156		break;
157	case VME_DMA:
158		return 0;
159		break;
160	default:
161		printk(KERN_ERR "Unknown resource type\n");
162		return 0;
163		break;
164	}
165}
166EXPORT_SYMBOL(vme_get_size);
167
168static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
169	unsigned long long size)
170{
171	int retval = 0;
172
173	switch (aspace) {
174	case VME_A16:
175		if (((vme_base + size) > VME_A16_MAX) ||
176				(vme_base > VME_A16_MAX))
177			retval = -EFAULT;
178		break;
179	case VME_A24:
180		if (((vme_base + size) > VME_A24_MAX) ||
181				(vme_base > VME_A24_MAX))
182			retval = -EFAULT;
183		break;
184	case VME_A32:
185		if (((vme_base + size) > VME_A32_MAX) ||
186				(vme_base > VME_A32_MAX))
187			retval = -EFAULT;
188		break;
189	case VME_A64:
190		/*
191		 * Any value held in an unsigned long long can be used as the
192		 * base
193		 */
194		break;
195	case VME_CRCSR:
196		if (((vme_base + size) > VME_CRCSR_MAX) ||
197				(vme_base > VME_CRCSR_MAX))
198			retval = -EFAULT;
199		break;
200	case VME_USER1:
201	case VME_USER2:
202	case VME_USER3:
203	case VME_USER4:
204		/* User Defined */
205		break;
206	default:
207		printk(KERN_ERR "Invalid address space\n");
208		retval = -EINVAL;
209		break;
210	}
211
212	return retval;
213}
214
215/*
216 * Request a slave image with specific attributes, return some unique
217 * identifier.
218 */
219struct vme_resource *vme_slave_request(struct device *dev,
220	vme_address_t address, vme_cycle_t cycle)
221{
222	struct vme_bridge *bridge;
223	struct list_head *slave_pos = NULL;
224	struct vme_slave_resource *allocated_image = NULL;
225	struct vme_slave_resource *slave_image = NULL;
226	struct vme_resource *resource = NULL;
227
228	bridge = dev_to_bridge(dev);
229	if (bridge == NULL) {
230		printk(KERN_ERR "Can't find VME bus\n");
231		goto err_bus;
232	}
233
234	/* Loop through slave resources */
235	list_for_each(slave_pos, &(bridge->slave_resources)) {
236		slave_image = list_entry(slave_pos,
237			struct vme_slave_resource, list);
238
239		if (slave_image == NULL) {
240			printk(KERN_ERR "Registered NULL Slave resource\n");
241			continue;
242		}
243
244		/* Find an unlocked and compatible image */
245		mutex_lock(&(slave_image->mtx));
246		if (((slave_image->address_attr & address) == address) &&
247			((slave_image->cycle_attr & cycle) == cycle) &&
248			(slave_image->locked == 0)) {
249
250			slave_image->locked = 1;
251			mutex_unlock(&(slave_image->mtx));
252			allocated_image = slave_image;
253			break;
254		}
255		mutex_unlock(&(slave_image->mtx));
256	}
257
258	/* No free image */
259	if (allocated_image == NULL)
260		goto err_image;
261
262	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
263	if (resource == NULL) {
264		printk(KERN_WARNING "Unable to allocate resource structure\n");
265		goto err_alloc;
266	}
267	resource->type = VME_SLAVE;
268	resource->entry = &(allocated_image->list);
269
270	return resource;
271
272err_alloc:
273	/* Unlock image */
274	mutex_lock(&(slave_image->mtx));
275	slave_image->locked = 0;
276	mutex_unlock(&(slave_image->mtx));
277err_image:
278err_bus:
279	return NULL;
280}
281EXPORT_SYMBOL(vme_slave_request);
282
283int vme_slave_set(struct vme_resource *resource, int enabled,
284	unsigned long long vme_base, unsigned long long size,
285	dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
286{
287	struct vme_bridge *bridge = find_bridge(resource);
288	struct vme_slave_resource *image;
289	int retval;
290
291	if (resource->type != VME_SLAVE) {
292		printk(KERN_ERR "Not a slave resource\n");
293		return -EINVAL;
294	}
295
296	image = list_entry(resource->entry, struct vme_slave_resource, list);
297
298	if (bridge->slave_set == NULL) {
299		printk(KERN_ERR "Function not supported\n");
300		return -ENOSYS;
301	}
302
303	if (!(((image->address_attr & aspace) == aspace) &&
304		((image->cycle_attr & cycle) == cycle))) {
305		printk(KERN_ERR "Invalid attributes\n");
306		return -EINVAL;
307	}
308
309	retval = vme_check_window(aspace, vme_base, size);
310	if (retval)
311		return retval;
312
313	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
314		aspace, cycle);
315}
316EXPORT_SYMBOL(vme_slave_set);
317
318int vme_slave_get(struct vme_resource *resource, int *enabled,
319	unsigned long long *vme_base, unsigned long long *size,
320	dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
321{
322	struct vme_bridge *bridge = find_bridge(resource);
323	struct vme_slave_resource *image;
324
325	if (resource->type != VME_SLAVE) {
326		printk(KERN_ERR "Not a slave resource\n");
327		return -EINVAL;
328	}
329
330	image = list_entry(resource->entry, struct vme_slave_resource, list);
331
332	if (bridge->slave_get == NULL) {
333		printk(KERN_ERR "vme_slave_get not supported\n");
334		return -EINVAL;
335	}
336
337	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
338		aspace, cycle);
339}
340EXPORT_SYMBOL(vme_slave_get);
341
342void vme_slave_free(struct vme_resource *resource)
343{
344	struct vme_slave_resource *slave_image;
345
346	if (resource->type != VME_SLAVE) {
347		printk(KERN_ERR "Not a slave resource\n");
348		return;
349	}
350
351	slave_image = list_entry(resource->entry, struct vme_slave_resource,
352		list);
353	if (slave_image == NULL) {
354		printk(KERN_ERR "Can't find slave resource\n");
355		return;
356	}
357
358	/* Unlock image */
359	mutex_lock(&(slave_image->mtx));
360	if (slave_image->locked == 0)
361		printk(KERN_ERR "Image is already free\n");
362
363	slave_image->locked = 0;
364	mutex_unlock(&(slave_image->mtx));
365
366	/* Free up resource memory */
367	kfree(resource);
368}
369EXPORT_SYMBOL(vme_slave_free);
370
371/*
372 * Request a master image with specific attributes, return some unique
373 * identifier.
374 */
375struct vme_resource *vme_master_request(struct device *dev,
376	vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
377{
378	struct vme_bridge *bridge;
379	struct list_head *master_pos = NULL;
380	struct vme_master_resource *allocated_image = NULL;
381	struct vme_master_resource *master_image = NULL;
382	struct vme_resource *resource = NULL;
383
384	bridge = dev_to_bridge(dev);
385	if (bridge == NULL) {
386		printk(KERN_ERR "Can't find VME bus\n");
387		goto err_bus;
388	}
389
390	/* Loop through master resources */
391	list_for_each(master_pos, &(bridge->master_resources)) {
392		master_image = list_entry(master_pos,
393			struct vme_master_resource, list);
394
395		if (master_image == NULL) {
396			printk(KERN_WARNING "Registered NULL master resource\n");
397			continue;
398		}
399
400		/* Find an unlocked and compatible image */
401		spin_lock(&(master_image->lock));
402		if (((master_image->address_attr & address) == address) &&
403			((master_image->cycle_attr & cycle) == cycle) &&
404			((master_image->width_attr & dwidth) == dwidth) &&
405			(master_image->locked == 0)) {
406
407			master_image->locked = 1;
408			spin_unlock(&(master_image->lock));
409			allocated_image = master_image;
410			break;
411		}
412		spin_unlock(&(master_image->lock));
413	}
414
415	/* Check to see if we found a resource */
416	if (allocated_image == NULL) {
417		printk(KERN_ERR "Can't find a suitable resource\n");
418		goto err_image;
419	}
420
421	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
422	if (resource == NULL) {
423		printk(KERN_ERR "Unable to allocate resource structure\n");
424		goto err_alloc;
425	}
426	resource->type = VME_MASTER;
427	resource->entry = &(allocated_image->list);
428
429	return resource;
430
431	kfree(resource);
432err_alloc:
433	/* Unlock image */
434	spin_lock(&(master_image->lock));
435	master_image->locked = 0;
436	spin_unlock(&(master_image->lock));
437err_image:
438err_bus:
439	return NULL;
440}
441EXPORT_SYMBOL(vme_master_request);
442
443int vme_master_set(struct vme_resource *resource, int enabled,
444	unsigned long long vme_base, unsigned long long size,
445	vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
446{
447	struct vme_bridge *bridge = find_bridge(resource);
448	struct vme_master_resource *image;
449	int retval;
450
451	if (resource->type != VME_MASTER) {
452		printk(KERN_ERR "Not a master resource\n");
453		return -EINVAL;
454	}
455
456	image = list_entry(resource->entry, struct vme_master_resource, list);
457
458	if (bridge->master_set == NULL) {
459		printk(KERN_WARNING "vme_master_set not supported\n");
460		return -EINVAL;
461	}
462
463	if (!(((image->address_attr & aspace) == aspace) &&
464		((image->cycle_attr & cycle) == cycle) &&
465		((image->width_attr & dwidth) == dwidth))) {
466		printk(KERN_WARNING "Invalid attributes\n");
467		return -EINVAL;
468	}
469
470	retval = vme_check_window(aspace, vme_base, size);
471	if (retval)
472		return retval;
473
474	return bridge->master_set(image, enabled, vme_base, size, aspace,
475		cycle, dwidth);
476}
477EXPORT_SYMBOL(vme_master_set);
478
479int vme_master_get(struct vme_resource *resource, int *enabled,
480	unsigned long long *vme_base, unsigned long long *size,
481	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
482{
483	struct vme_bridge *bridge = find_bridge(resource);
484	struct vme_master_resource *image;
485
486	if (resource->type != VME_MASTER) {
487		printk(KERN_ERR "Not a master resource\n");
488		return -EINVAL;
489	}
490
491	image = list_entry(resource->entry, struct vme_master_resource, list);
492
493	if (bridge->master_get == NULL) {
494		printk(KERN_WARNING "vme_master_set not supported\n");
495		return -EINVAL;
496	}
497
498	return bridge->master_get(image, enabled, vme_base, size, aspace,
499		cycle, dwidth);
500}
501EXPORT_SYMBOL(vme_master_get);
502
503/*
504 * Read data out of VME space into a buffer.
505 */
506ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
507	loff_t offset)
508{
509	struct vme_bridge *bridge = find_bridge(resource);
510	struct vme_master_resource *image;
511	size_t length;
512
513	if (bridge->master_read == NULL) {
514		printk(KERN_WARNING "Reading from resource not supported\n");
515		return -EINVAL;
516	}
517
518	if (resource->type != VME_MASTER) {
519		printk(KERN_ERR "Not a master resource\n");
520		return -EINVAL;
521	}
522
523	image = list_entry(resource->entry, struct vme_master_resource, list);
524
525	length = vme_get_size(resource);
526
527	if (offset > length) {
528		printk(KERN_WARNING "Invalid Offset\n");
529		return -EFAULT;
530	}
531
532	if ((offset + count) > length)
533		count = length - offset;
534
535	return bridge->master_read(image, buf, count, offset);
536
537}
538EXPORT_SYMBOL(vme_master_read);
539
540/*
541 * Write data out to VME space from a buffer.
542 */
543ssize_t vme_master_write(struct vme_resource *resource, void *buf,
544	size_t count, loff_t offset)
545{
546	struct vme_bridge *bridge = find_bridge(resource);
547	struct vme_master_resource *image;
548	size_t length;
549
550	if (bridge->master_write == NULL) {
551		printk(KERN_WARNING "Writing to resource not supported\n");
552		return -EINVAL;
553	}
554
555	if (resource->type != VME_MASTER) {
556		printk(KERN_ERR "Not a master resource\n");
557		return -EINVAL;
558	}
559
560	image = list_entry(resource->entry, struct vme_master_resource, list);
561
562	length = vme_get_size(resource);
563
564	if (offset > length) {
565		printk(KERN_WARNING "Invalid Offset\n");
566		return -EFAULT;
567	}
568
569	if ((offset + count) > length)
570		count = length - offset;
571
572	return bridge->master_write(image, buf, count, offset);
573}
574EXPORT_SYMBOL(vme_master_write);
575
576/*
577 * Perform RMW cycle to provided location.
578 */
579unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
580	unsigned int compare, unsigned int swap, loff_t offset)
581{
582	struct vme_bridge *bridge = find_bridge(resource);
583	struct vme_master_resource *image;
584
585	if (bridge->master_rmw == NULL) {
586		printk(KERN_WARNING "Writing to resource not supported\n");
587		return -EINVAL;
588	}
589
590	if (resource->type != VME_MASTER) {
591		printk(KERN_ERR "Not a master resource\n");
592		return -EINVAL;
593	}
594
595	image = list_entry(resource->entry, struct vme_master_resource, list);
596
597	return bridge->master_rmw(image, mask, compare, swap, offset);
598}
599EXPORT_SYMBOL(vme_master_rmw);
600
601void vme_master_free(struct vme_resource *resource)
602{
603	struct vme_master_resource *master_image;
604
605	if (resource->type != VME_MASTER) {
606		printk(KERN_ERR "Not a master resource\n");
607		return;
608	}
609
610	master_image = list_entry(resource->entry, struct vme_master_resource,
611		list);
612	if (master_image == NULL) {
613		printk(KERN_ERR "Can't find master resource\n");
614		return;
615	}
616
617	/* Unlock image */
618	spin_lock(&(master_image->lock));
619	if (master_image->locked == 0)
620		printk(KERN_ERR "Image is already free\n");
621
622	master_image->locked = 0;
623	spin_unlock(&(master_image->lock));
624
625	/* Free up resource memory */
626	kfree(resource);
627}
628EXPORT_SYMBOL(vme_master_free);
629
630/*
631 * Request a DMA controller with specific attributes, return some unique
632 * identifier.
633 */
634struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
635{
636	struct vme_bridge *bridge;
637	struct list_head *dma_pos = NULL;
638	struct vme_dma_resource *allocated_ctrlr = NULL;
639	struct vme_dma_resource *dma_ctrlr = NULL;
640	struct vme_resource *resource = NULL;
641
642	printk(KERN_ERR "No VME resource Attribute tests done\n");
643
644	bridge = dev_to_bridge(dev);
645	if (bridge == NULL) {
646		printk(KERN_ERR "Can't find VME bus\n");
647		goto err_bus;
648	}
649
650	/* Loop through DMA resources */
651	list_for_each(dma_pos, &(bridge->dma_resources)) {
652		dma_ctrlr = list_entry(dma_pos,
653			struct vme_dma_resource, list);
654
655		if (dma_ctrlr == NULL) {
656			printk(KERN_ERR "Registered NULL DMA resource\n");
657			continue;
658		}
659
660		/* Find an unlocked and compatible controller */
661		mutex_lock(&(dma_ctrlr->mtx));
662		if (((dma_ctrlr->route_attr & route) == route) &&
663			(dma_ctrlr->locked == 0)) {
664
665			dma_ctrlr->locked = 1;
666			mutex_unlock(&(dma_ctrlr->mtx));
667			allocated_ctrlr = dma_ctrlr;
668			break;
669		}
670		mutex_unlock(&(dma_ctrlr->mtx));
671	}
672
673	/* Check to see if we found a resource */
674	if (allocated_ctrlr == NULL)
675		goto err_ctrlr;
676
677	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
678	if (resource == NULL) {
679		printk(KERN_WARNING "Unable to allocate resource structure\n");
680		goto err_alloc;
681	}
682	resource->type = VME_DMA;
683	resource->entry = &(allocated_ctrlr->list);
684
685	return resource;
686
687err_alloc:
688	/* Unlock image */
689	mutex_lock(&(dma_ctrlr->mtx));
690	dma_ctrlr->locked = 0;
691	mutex_unlock(&(dma_ctrlr->mtx));
692err_ctrlr:
693err_bus:
694	return NULL;
695}
696EXPORT_SYMBOL(vme_dma_request);
697
698/*
699 * Start new list
700 */
701struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
702{
703	struct vme_dma_resource *ctrlr;
704	struct vme_dma_list *dma_list;
705
706	if (resource->type != VME_DMA) {
707		printk(KERN_ERR "Not a DMA resource\n");
708		return NULL;
709	}
710
711	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
712
713	dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
714	if (dma_list == NULL) {
715		printk(KERN_ERR "Unable to allocate memory for new dma list\n");
716		return NULL;
717	}
718	INIT_LIST_HEAD(&(dma_list->entries));
719	dma_list->parent = ctrlr;
720	mutex_init(&(dma_list->mtx));
721
722	return dma_list;
723}
724EXPORT_SYMBOL(vme_new_dma_list);
725
726/*
727 * Create "Pattern" type attributes
728 */
729struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
730	vme_pattern_t type)
731{
732	struct vme_dma_attr *attributes;
733	struct vme_dma_pattern *pattern_attr;
734
735	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
736	if (attributes == NULL) {
737		printk(KERN_ERR "Unable to allocate memory for attributes "
738			"structure\n");
739		goto err_attr;
740	}
741
742	pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
743	if (pattern_attr == NULL) {
744		printk(KERN_ERR "Unable to allocate memory for pattern "
745			"attributes\n");
746		goto err_pat;
747	}
748
749	attributes->type = VME_DMA_PATTERN;
750	attributes->private = (void *)pattern_attr;
751
752	pattern_attr->pattern = pattern;
753	pattern_attr->type = type;
754
755	return attributes;
756
757	kfree(pattern_attr);
758err_pat:
759	kfree(attributes);
760err_attr:
761	return NULL;
762}
763EXPORT_SYMBOL(vme_dma_pattern_attribute);
764
765/*
766 * Create "PCI" type attributes
767 */
768struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
769{
770	struct vme_dma_attr *attributes;
771	struct vme_dma_pci *pci_attr;
772
773
774	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
775	if (attributes == NULL) {
776		printk(KERN_ERR "Unable to allocate memory for attributes "
777			"structure\n");
778		goto err_attr;
779	}
780
781	pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
782	if (pci_attr == NULL) {
783		printk(KERN_ERR "Unable to allocate memory for pci "
784			"attributes\n");
785		goto err_pci;
786	}
787
788
789
790	attributes->type = VME_DMA_PCI;
791	attributes->private = (void *)pci_attr;
792
793	pci_attr->address = address;
794
795	return attributes;
796
797	kfree(pci_attr);
798err_pci:
799	kfree(attributes);
800err_attr:
801	return NULL;
802}
803EXPORT_SYMBOL(vme_dma_pci_attribute);
804
805/*
806 * Create "VME" type attributes
807 */
808struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
809	vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
810{
811	struct vme_dma_attr *attributes;
812	struct vme_dma_vme *vme_attr;
813
814	attributes = kmalloc(
815		sizeof(struct vme_dma_attr), GFP_KERNEL);
816	if (attributes == NULL) {
817		printk(KERN_ERR "Unable to allocate memory for attributes "
818			"structure\n");
819		goto err_attr;
820	}
821
822	vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
823	if (vme_attr == NULL) {
824		printk(KERN_ERR "Unable to allocate memory for vme "
825			"attributes\n");
826		goto err_vme;
827	}
828
829	attributes->type = VME_DMA_VME;
830	attributes->private = (void *)vme_attr;
831
832	vme_attr->address = address;
833	vme_attr->aspace = aspace;
834	vme_attr->cycle = cycle;
835	vme_attr->dwidth = dwidth;
836
837	return attributes;
838
839	kfree(vme_attr);
840err_vme:
841	kfree(attributes);
842err_attr:
843	return NULL;
844}
845EXPORT_SYMBOL(vme_dma_vme_attribute);
846
847/*
848 * Free attribute
849 */
850void vme_dma_free_attribute(struct vme_dma_attr *attributes)
851{
852	kfree(attributes->private);
853	kfree(attributes);
854}
855EXPORT_SYMBOL(vme_dma_free_attribute);
856
857int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
858	struct vme_dma_attr *dest, size_t count)
859{
860	struct vme_bridge *bridge = list->parent->parent;
861	int retval;
862
863	if (bridge->dma_list_add == NULL) {
864		printk(KERN_WARNING "Link List DMA generation not supported\n");
865		return -EINVAL;
866	}
867
868	if (!mutex_trylock(&(list->mtx))) {
869		printk(KERN_ERR "Link List already submitted\n");
870		return -EINVAL;
871	}
872
873	retval = bridge->dma_list_add(list, src, dest, count);
874
875	mutex_unlock(&(list->mtx));
876
877	return retval;
878}
879EXPORT_SYMBOL(vme_dma_list_add);
880
881int vme_dma_list_exec(struct vme_dma_list *list)
882{
883	struct vme_bridge *bridge = list->parent->parent;
884	int retval;
885
886	if (bridge->dma_list_exec == NULL) {
887		printk(KERN_ERR "Link List DMA execution not supported\n");
888		return -EINVAL;
889	}
890
891	mutex_lock(&(list->mtx));
892
893	retval = bridge->dma_list_exec(list);
894
895	mutex_unlock(&(list->mtx));
896
897	return retval;
898}
899EXPORT_SYMBOL(vme_dma_list_exec);
900
901int vme_dma_list_free(struct vme_dma_list *list)
902{
903	struct vme_bridge *bridge = list->parent->parent;
904	int retval;
905
906	if (bridge->dma_list_empty == NULL) {
907		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
908		return -EINVAL;
909	}
910
911	if (!mutex_trylock(&(list->mtx))) {
912		printk(KERN_ERR "Link List in use\n");
913		return -EINVAL;
914	}
915
916	/*
917	 * Empty out all of the entries from the dma list. We need to go to the
918	 * low level driver as dma entries are driver specific.
919	 */
920	retval = bridge->dma_list_empty(list);
921	if (retval) {
922		printk(KERN_ERR "Unable to empty link-list entries\n");
923		mutex_unlock(&(list->mtx));
924		return retval;
925	}
926	mutex_unlock(&(list->mtx));
927	kfree(list);
928
929	return retval;
930}
931EXPORT_SYMBOL(vme_dma_list_free);
932
933int vme_dma_free(struct vme_resource *resource)
934{
935	struct vme_dma_resource *ctrlr;
936
937	if (resource->type != VME_DMA) {
938		printk(KERN_ERR "Not a DMA resource\n");
939		return -EINVAL;
940	}
941
942	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
943
944	if (!mutex_trylock(&(ctrlr->mtx))) {
945		printk(KERN_ERR "Resource busy, can't free\n");
946		return -EBUSY;
947	}
948
949	if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
950		printk(KERN_WARNING "Resource still processing transfers\n");
951		mutex_unlock(&(ctrlr->mtx));
952		return -EBUSY;
953	}
954
955	ctrlr->locked = 0;
956
957	mutex_unlock(&(ctrlr->mtx));
958
959	return 0;
960}
961EXPORT_SYMBOL(vme_dma_free);
962
963void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
964{
965	void (*call)(int, int, void *);
966	void *priv_data;
967
968	call = bridge->irq[level - 1].callback[statid].func;
969	priv_data = bridge->irq[level - 1].callback[statid].priv_data;
970
971	if (call != NULL)
972		call(level, statid, priv_data);
973	else
974		printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
975			"vector:%x\n", level, statid);
976}
977EXPORT_SYMBOL(vme_irq_handler);
978
979int vme_irq_request(struct device *dev, int level, int statid,
980	void (*callback)(int, int, void *),
981	void *priv_data)
982{
983	struct vme_bridge *bridge;
984
985	bridge = dev_to_bridge(dev);
986	if (bridge == NULL) {
987		printk(KERN_ERR "Can't find VME bus\n");
988		return -EINVAL;
989	}
990
991	if ((level < 1) || (level > 7)) {
992		printk(KERN_ERR "Invalid interrupt level\n");
993		return -EINVAL;
994	}
995
996	if (bridge->irq_set == NULL) {
997		printk(KERN_ERR "Configuring interrupts not supported\n");
998		return -EINVAL;
999	}
1000
1001	mutex_lock(&(bridge->irq_mtx));
1002
1003	if (bridge->irq[level - 1].callback[statid].func) {
1004		mutex_unlock(&(bridge->irq_mtx));
1005		printk(KERN_WARNING "VME Interrupt already taken\n");
1006		return -EBUSY;
1007	}
1008
1009	bridge->irq[level - 1].count++;
1010	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1011	bridge->irq[level - 1].callback[statid].func = callback;
1012
1013	/* Enable IRQ level */
1014	bridge->irq_set(bridge, level, 1, 1);
1015
1016	mutex_unlock(&(bridge->irq_mtx));
1017
1018	return 0;
1019}
1020EXPORT_SYMBOL(vme_irq_request);
1021
1022void vme_irq_free(struct device *dev, int level, int statid)
1023{
1024	struct vme_bridge *bridge;
1025
1026	bridge = dev_to_bridge(dev);
1027	if (bridge == NULL) {
1028		printk(KERN_ERR "Can't find VME bus\n");
1029		return;
1030	}
1031
1032	if ((level < 1) || (level > 7)) {
1033		printk(KERN_ERR "Invalid interrupt level\n");
1034		return;
1035	}
1036
1037	if (bridge->irq_set == NULL) {
1038		printk(KERN_ERR "Configuring interrupts not supported\n");
1039		return;
1040	}
1041
1042	mutex_lock(&(bridge->irq_mtx));
1043
1044	bridge->irq[level - 1].count--;
1045
1046	/* Disable IRQ level if no more interrupts attached at this level*/
1047	if (bridge->irq[level - 1].count == 0)
1048		bridge->irq_set(bridge, level, 0, 1);
1049
1050	bridge->irq[level - 1].callback[statid].func = NULL;
1051	bridge->irq[level - 1].callback[statid].priv_data = NULL;
1052
1053	mutex_unlock(&(bridge->irq_mtx));
1054}
1055EXPORT_SYMBOL(vme_irq_free);
1056
1057int vme_irq_generate(struct device *dev, int level, int statid)
1058{
1059	struct vme_bridge *bridge;
1060
1061	bridge = dev_to_bridge(dev);
1062	if (bridge == NULL) {
1063		printk(KERN_ERR "Can't find VME bus\n");
1064		return -EINVAL;
1065	}
1066
1067	if ((level < 1) || (level > 7)) {
1068		printk(KERN_WARNING "Invalid interrupt level\n");
1069		return -EINVAL;
1070	}
1071
1072	if (bridge->irq_generate == NULL) {
1073		printk(KERN_WARNING "Interrupt generation not supported\n");
1074		return -EINVAL;
1075	}
1076
1077	return bridge->irq_generate(bridge, level, statid);
1078}
1079EXPORT_SYMBOL(vme_irq_generate);
1080
1081/*
1082 * Request the location monitor, return resource or NULL
1083 */
1084struct vme_resource *vme_lm_request(struct device *dev)
1085{
1086	struct vme_bridge *bridge;
1087	struct list_head *lm_pos = NULL;
1088	struct vme_lm_resource *allocated_lm = NULL;
1089	struct vme_lm_resource *lm = NULL;
1090	struct vme_resource *resource = NULL;
1091
1092	bridge = dev_to_bridge(dev);
1093	if (bridge == NULL) {
1094		printk(KERN_ERR "Can't find VME bus\n");
1095		goto err_bus;
1096	}
1097
1098	/* Loop through DMA resources */
1099	list_for_each(lm_pos, &(bridge->lm_resources)) {
1100		lm = list_entry(lm_pos,
1101			struct vme_lm_resource, list);
1102
1103		if (lm == NULL) {
1104			printk(KERN_ERR "Registered NULL Location Monitor "
1105				"resource\n");
1106			continue;
1107		}
1108
1109		/* Find an unlocked controller */
1110		mutex_lock(&(lm->mtx));
1111		if (lm->locked == 0) {
1112			lm->locked = 1;
1113			mutex_unlock(&(lm->mtx));
1114			allocated_lm = lm;
1115			break;
1116		}
1117		mutex_unlock(&(lm->mtx));
1118	}
1119
1120	/* Check to see if we found a resource */
1121	if (allocated_lm == NULL)
1122		goto err_lm;
1123
1124	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1125	if (resource == NULL) {
1126		printk(KERN_ERR "Unable to allocate resource structure\n");
1127		goto err_alloc;
1128	}
1129	resource->type = VME_LM;
1130	resource->entry = &(allocated_lm->list);
1131
1132	return resource;
1133
1134err_alloc:
1135	/* Unlock image */
1136	mutex_lock(&(lm->mtx));
1137	lm->locked = 0;
1138	mutex_unlock(&(lm->mtx));
1139err_lm:
1140err_bus:
1141	return NULL;
1142}
1143EXPORT_SYMBOL(vme_lm_request);
1144
1145int vme_lm_count(struct vme_resource *resource)
1146{
1147	struct vme_lm_resource *lm;
1148
1149	if (resource->type != VME_LM) {
1150		printk(KERN_ERR "Not a Location Monitor resource\n");
1151		return -EINVAL;
1152	}
1153
1154	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1155
1156	return lm->monitors;
1157}
1158EXPORT_SYMBOL(vme_lm_count);
1159
1160int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1161	vme_address_t aspace, vme_cycle_t cycle)
1162{
1163	struct vme_bridge *bridge = find_bridge(resource);
1164	struct vme_lm_resource *lm;
1165
1166	if (resource->type != VME_LM) {
1167		printk(KERN_ERR "Not a Location Monitor resource\n");
1168		return -EINVAL;
1169	}
1170
1171	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1172
1173	if (bridge->lm_set == NULL) {
1174		printk(KERN_ERR "vme_lm_set not supported\n");
1175		return -EINVAL;
1176	}
1177
1178	return bridge->lm_set(lm, lm_base, aspace, cycle);
1179}
1180EXPORT_SYMBOL(vme_lm_set);
1181
1182int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1183	vme_address_t *aspace, vme_cycle_t *cycle)
1184{
1185	struct vme_bridge *bridge = find_bridge(resource);
1186	struct vme_lm_resource *lm;
1187
1188	if (resource->type != VME_LM) {
1189		printk(KERN_ERR "Not a Location Monitor resource\n");
1190		return -EINVAL;
1191	}
1192
1193	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1194
1195	if (bridge->lm_get == NULL) {
1196		printk(KERN_ERR "vme_lm_get not supported\n");
1197		return -EINVAL;
1198	}
1199
1200	return bridge->lm_get(lm, lm_base, aspace, cycle);
1201}
1202EXPORT_SYMBOL(vme_lm_get);
1203
1204int vme_lm_attach(struct vme_resource *resource, int monitor,
1205	void (*callback)(int))
1206{
1207	struct vme_bridge *bridge = find_bridge(resource);
1208	struct vme_lm_resource *lm;
1209
1210	if (resource->type != VME_LM) {
1211		printk(KERN_ERR "Not a Location Monitor resource\n");
1212		return -EINVAL;
1213	}
1214
1215	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1216
1217	if (bridge->lm_attach == NULL) {
1218		printk(KERN_ERR "vme_lm_attach not supported\n");
1219		return -EINVAL;
1220	}
1221
1222	return bridge->lm_attach(lm, monitor, callback);
1223}
1224EXPORT_SYMBOL(vme_lm_attach);
1225
1226int vme_lm_detach(struct vme_resource *resource, int monitor)
1227{
1228	struct vme_bridge *bridge = find_bridge(resource);
1229	struct vme_lm_resource *lm;
1230
1231	if (resource->type != VME_LM) {
1232		printk(KERN_ERR "Not a Location Monitor resource\n");
1233		return -EINVAL;
1234	}
1235
1236	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1237
1238	if (bridge->lm_detach == NULL) {
1239		printk(KERN_ERR "vme_lm_detach not supported\n");
1240		return -EINVAL;
1241	}
1242
1243	return bridge->lm_detach(lm, monitor);
1244}
1245EXPORT_SYMBOL(vme_lm_detach);
1246
1247void vme_lm_free(struct vme_resource *resource)
1248{
1249	struct vme_lm_resource *lm;
1250
1251	if (resource->type != VME_LM) {
1252		printk(KERN_ERR "Not a Location Monitor resource\n");
1253		return;
1254	}
1255
1256	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1257
1258	mutex_lock(&(lm->mtx));
1259
1260
1261	lm->locked = 0;
1262
1263	mutex_unlock(&(lm->mtx));
1264
1265	kfree(resource);
1266}
1267EXPORT_SYMBOL(vme_lm_free);
1268
1269int vme_slot_get(struct device *bus)
1270{
1271	struct vme_bridge *bridge;
1272
1273	bridge = dev_to_bridge(bus);
1274	if (bridge == NULL) {
1275		printk(KERN_ERR "Can't find VME bus\n");
1276		return -EINVAL;
1277	}
1278
1279	if (bridge->slot_get == NULL) {
1280		printk(KERN_WARNING "vme_slot_get not supported\n");
1281		return -EINVAL;
1282	}
1283
1284	return bridge->slot_get(bridge);
1285}
1286EXPORT_SYMBOL(vme_slot_get);
1287
1288
1289/* - Bridge Registration --------------------------------------------------- */
1290
1291static int vme_alloc_bus_num(void)
1292{
1293	int i;
1294
1295	mutex_lock(&vme_bus_num_mtx);
1296	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1297		if (((vme_bus_numbers >> i) & 0x1) == 0) {
1298			vme_bus_numbers |= (0x1 << i);
1299			break;
1300		}
1301	}
1302	mutex_unlock(&vme_bus_num_mtx);
1303
1304	return i;
1305}
1306
1307static void vme_free_bus_num(int bus)
1308{
1309	mutex_lock(&vme_bus_num_mtx);
1310	vme_bus_numbers |= ~(0x1 << bus);
1311	mutex_unlock(&vme_bus_num_mtx);
1312}
1313
1314int vme_register_bridge(struct vme_bridge *bridge)
1315{
1316	struct device *dev;
1317	int retval;
1318	int i;
1319
1320	bridge->num = vme_alloc_bus_num();
1321
1322	/* This creates 32 vme "slot" devices. This equates to a slot for each
1323	 * ID available in a system conforming to the ANSI/VITA 1-1994
1324	 * specification.
1325	 */
1326	for (i = 0; i < VME_SLOTS_MAX; i++) {
1327		dev = &(bridge->dev[i]);
1328		memset(dev, 0, sizeof(struct device));
1329
1330		dev->parent = bridge->parent;
1331		dev->bus = &(vme_bus_type);
1332		/*
1333		 * We save a pointer to the bridge in platform_data so that we
1334		 * can get to it later. We keep driver_data for use by the
1335		 * driver that binds against the slot
1336		 */
1337		dev->platform_data = bridge;
1338		dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1339
1340		retval = device_register(dev);
1341		if (retval)
1342			goto err_reg;
1343	}
1344
1345	return retval;
1346
1347	i = VME_SLOTS_MAX;
1348err_reg:
1349	while (i > -1) {
1350		dev = &(bridge->dev[i]);
1351		device_unregister(dev);
1352	}
1353	vme_free_bus_num(bridge->num);
1354	return retval;
1355}
1356EXPORT_SYMBOL(vme_register_bridge);
1357
1358void vme_unregister_bridge(struct vme_bridge *bridge)
1359{
1360	int i;
1361	struct device *dev;
1362
1363
1364	for (i = 0; i < VME_SLOTS_MAX; i++) {
1365		dev = &(bridge->dev[i]);
1366		device_unregister(dev);
1367	}
1368	vme_free_bus_num(bridge->num);
1369}
1370EXPORT_SYMBOL(vme_unregister_bridge);
1371
1372
1373/* - Driver Registration --------------------------------------------------- */
1374
1375int vme_register_driver(struct vme_driver *drv)
1376{
1377	drv->driver.name = drv->name;
1378	drv->driver.bus = &vme_bus_type;
1379
1380	return driver_register(&drv->driver);
1381}
1382EXPORT_SYMBOL(vme_register_driver);
1383
1384void vme_unregister_driver(struct vme_driver *drv)
1385{
1386	driver_unregister(&drv->driver);
1387}
1388EXPORT_SYMBOL(vme_unregister_driver);
1389
1390/* - Bus Registration ------------------------------------------------------ */
1391
1392static int vme_calc_slot(struct device *dev)
1393{
1394	struct vme_bridge *bridge;
1395	int num;
1396
1397	bridge = dev_to_bridge(dev);
1398
1399	/* Determine slot number */
1400	num = 0;
1401	while (num < VME_SLOTS_MAX) {
1402		if (&(bridge->dev[num]) == dev)
1403			break;
1404
1405		num++;
1406	}
1407	if (num == VME_SLOTS_MAX) {
1408		dev_err(dev, "Failed to identify slot\n");
1409		num = 0;
1410		goto err_dev;
1411	}
1412	num++;
1413
1414err_dev:
1415	return num;
1416}
1417
1418static struct vme_driver *dev_to_vme_driver(struct device *dev)
1419{
1420	if (dev->driver == NULL)
1421		printk(KERN_ERR "Bugger dev->driver is NULL\n");
1422
1423	return container_of(dev->driver, struct vme_driver, driver);
1424}
1425
1426static int vme_bus_match(struct device *dev, struct device_driver *drv)
1427{
1428	struct vme_bridge *bridge;
1429	struct vme_driver *driver;
1430	int i, num;
1431
1432	bridge = dev_to_bridge(dev);
1433	driver = container_of(drv, struct vme_driver, driver);
1434
1435	num = vme_calc_slot(dev);
1436	if (!num)
1437		goto err_dev;
1438
1439	if (driver->bind_table == NULL) {
1440		dev_err(dev, "Bind table NULL\n");
1441		goto err_table;
1442	}
1443
1444	i = 0;
1445	while ((driver->bind_table[i].bus != 0) ||
1446		(driver->bind_table[i].slot != 0)) {
1447
1448		if (bridge->num == driver->bind_table[i].bus) {
1449			if (num == driver->bind_table[i].slot)
1450				return 1;
1451
1452			if (driver->bind_table[i].slot == VME_SLOT_ALL)
1453				return 1;
1454
1455			if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1456				(num == vme_slot_get(dev)))
1457				return 1;
1458		}
1459		i++;
1460	}
1461
1462err_dev:
1463err_table:
1464	return 0;
1465}
1466
1467static int vme_bus_probe(struct device *dev)
1468{
1469	struct vme_bridge *bridge;
1470	struct vme_driver *driver;
1471	int retval = -ENODEV;
1472
1473	driver = dev_to_vme_driver(dev);
1474	bridge = dev_to_bridge(dev);
1475
1476	if (driver->probe != NULL)
1477		retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1478
1479	return retval;
1480}
1481
1482static int vme_bus_remove(struct device *dev)
1483{
1484	struct vme_bridge *bridge;
1485	struct vme_driver *driver;
1486	int retval = -ENODEV;
1487
1488	driver = dev_to_vme_driver(dev);
1489	bridge = dev_to_bridge(dev);
1490
1491	if (driver->remove != NULL)
1492		retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1493
1494	return retval;
1495}
1496
1497struct bus_type vme_bus_type = {
1498	.name = "vme",
1499	.match = vme_bus_match,
1500	.probe = vme_bus_probe,
1501	.remove = vme_bus_remove,
1502};
1503EXPORT_SYMBOL(vme_bus_type);
1504
1505static int __init vme_init(void)
1506{
1507	return bus_register(&vme_bus_type);
1508}
1509
1510static void __exit vme_exit(void)
1511{
1512	bus_unregister(&vme_bus_type);
1513}
1514
1515MODULE_DESCRIPTION("VME bridge driver framework");
1516MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1517MODULE_LICENSE("GPL");
1518
1519module_init(vme_init);
1520module_exit(vme_exit);
1521