• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/ia64/sn/pci/
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc.  All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/bitmap.h>
13#include <linux/slab.h>
14#include <asm/sn/sn_sal.h>
15#include <asm/sn/addrs.h>
16#include <asm/sn/io.h>
17#include <asm/sn/pcidev.h>
18#include <asm/sn/pcibus_provider_defs.h>
19#include <asm/sn/tioca_provider.h>
20
21u32 tioca_gart_found;
22EXPORT_SYMBOL(tioca_gart_found);	/* used by agp-sgi */
23
24LIST_HEAD(tioca_list);
25EXPORT_SYMBOL(tioca_list);	/* used by agp-sgi */
26
27static int tioca_gart_init(struct tioca_kernel *);
28
29/**
30 * tioca_gart_init - Initialize SGI TIOCA GART
31 * @tioca_common: ptr to common prom/kernel struct identifying the
32 *
33 * If the indicated tioca has devices present, initialize its associated
34 * GART MMR's and kernel memory.
35 */
36static int
37tioca_gart_init(struct tioca_kernel *tioca_kern)
38{
39	u64 ap_reg;
40	u64 offset;
41	struct page *tmp;
42	struct tioca_common *tioca_common;
43	struct tioca __iomem *ca_base;
44
45	tioca_common = tioca_kern->ca_common;
46	ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
47
48	if (list_empty(tioca_kern->ca_devices))
49		return 0;
50
51	ap_reg = 0;
52
53	/*
54	 * Validate aperature size
55	 */
56
57	switch (CA_APERATURE_SIZE >> 20) {
58	case 4:
59		ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT);	/* 4MB */
60		break;
61	case 8:
62		ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT);	/* 8MB */
63		break;
64	case 16:
65		ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT);	/* 16MB */
66		break;
67	case 32:
68		ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT);	/* 32 MB */
69		break;
70	case 64:
71		ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT);	/* 64 MB */
72		break;
73	case 128:
74		ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT);	/* 128 MB */
75		break;
76	case 256:
77		ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT);	/* 256 MB */
78		break;
79	case 512:
80		ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT);	/* 512 MB */
81		break;
82	case 1024:
83		ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT);	/* 1GB */
84		break;
85	case 2048:
86		ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT);	/* 2GB */
87		break;
88	case 4096:
89		ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT);	/* 4 GB */
90		break;
91	default:
92		printk(KERN_ERR "%s:  Invalid CA_APERATURE_SIZE "
93		       "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
94		return -1;
95	}
96
97	/*
98	 * Set up other aperature parameters
99	 */
100
101	if (PAGE_SIZE >= 16384) {
102		tioca_kern->ca_ap_pagesize = 16384;
103		ap_reg |= CA_GART_PAGE_SIZE;
104	} else {
105		tioca_kern->ca_ap_pagesize = 4096;
106	}
107
108	tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
109	tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
110	tioca_kern->ca_gart_entries =
111	    tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
112
113	ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
114	ap_reg |= tioca_kern->ca_ap_bus_base;
115
116	/*
117	 * Allocate and set up the GART
118	 */
119
120	tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
121	tmp =
122	    alloc_pages_node(tioca_kern->ca_closest_node,
123			     GFP_KERNEL | __GFP_ZERO,
124			     get_order(tioca_kern->ca_gart_size));
125
126	if (!tmp) {
127		printk(KERN_ERR "%s:  Could not allocate "
128		       "%llu bytes (order %d) for GART\n",
129		       __func__,
130		       tioca_kern->ca_gart_size,
131		       get_order(tioca_kern->ca_gart_size));
132		return -ENOMEM;
133	}
134
135	tioca_kern->ca_gart = page_address(tmp);
136	tioca_kern->ca_gart_coretalk_addr =
137	    PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
138
139	/*
140	 * Compute PCI/AGP convenience fields
141	 */
142
143	offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
144	tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
145	tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
146	tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
147	tioca_kern->ca_pcigart_base =
148	    tioca_kern->ca_gart_coretalk_addr + offset;
149	tioca_kern->ca_pcigart =
150	    &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
151	tioca_kern->ca_pcigart_entries =
152	    tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
153	tioca_kern->ca_pcigart_pagemap =
154	    kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
155	if (!tioca_kern->ca_pcigart_pagemap) {
156		free_pages((unsigned long)tioca_kern->ca_gart,
157			   get_order(tioca_kern->ca_gart_size));
158		return -1;
159	}
160
161	offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
162	tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
163	tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
164	tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
165	tioca_kern->ca_gfxgart_base =
166	    tioca_kern->ca_gart_coretalk_addr + offset;
167	tioca_kern->ca_gfxgart =
168	    &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
169	tioca_kern->ca_gfxgart_entries =
170	    tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
171
172	/*
173	 * various control settings:
174	 *      use agp op-combining
175	 *      use GET semantics to fetch memory
176	 *      participate in coherency domain
177	 * 	DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
178	 */
179
180	__sn_setq_relaxed(&ca_base->ca_control1,
181			CA_AGPDMA_OP_ENB_COMBDELAY);	/* PV895469 ? */
182	__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
183	__sn_setq_relaxed(&ca_base->ca_control2,
184			(0x2ull << CA_GART_MEM_PARAM_SHFT));
185	tioca_kern->ca_gart_iscoherent = 1;
186	__sn_clrq_relaxed(&ca_base->ca_control2,
187	    		(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
188
189	/*
190	 * Unmask GART fetch error interrupts.  Clear residual errors first.
191	 */
192
193	writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
194	writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
195	__sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
196
197	/*
198	 * Program the aperature and gart registers in TIOCA
199	 */
200
201	writeq(ap_reg, &ca_base->ca_gart_aperature);
202	writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
203
204	return 0;
205}
206
207/**
208 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
209 * @tioca_kernel: structure representing the CA
210 *
211 * Given a CA, scan all attached functions making sure they all support
212 * FastWrite.  If so, enable FastWrite for all functions and the CA itself.
213 */
214
215void
216tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
217{
218	int cap_ptr;
219	u32 reg;
220	struct tioca __iomem *tioca_base;
221	struct pci_dev *pdev;
222	struct tioca_common *common;
223
224	common = tioca_kern->ca_common;
225
226	/*
227	 * Scan all vga controllers on this bus making sure they all
228	 * support FW.  If not, return.
229	 */
230
231	list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
232		if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
233			continue;
234
235		cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
236		if (!cap_ptr)
237			return;	/* no AGP CAP means no FW */
238
239		pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
240		if (!(reg & PCI_AGP_STATUS_FW))
241			return;	/* function doesn't support FW */
242	}
243
244	/*
245	 * Set fw for all vga fn's
246	 */
247
248	list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
249		if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
250			continue;
251
252		cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
253		pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
254		reg |= PCI_AGP_COMMAND_FW;
255		pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
256	}
257
258	/*
259	 * Set ca's fw to match
260	 */
261
262	tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
263	__sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
264}
265
266EXPORT_SYMBOL(tioca_fastwrite_enable);	/* used by agp-sgi */
267
268/**
269 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
270 * @paddr: system physical address
271 *
272 * Map @paddr into 64-bit CA bus space.  No device context is necessary.
273 * Bits 53:0 come from the coretalk address.  We just need to mask in the
274 * following optional bits of the 64-bit pci address:
275 *
276 * 63:60 - Coretalk Packet Type -  0x1 for Mem Get/Put (coherent)
277 *                                 0x2 for PIO (non-coherent)
278 *                                 We will always use 0x1
279 * 55:55 - Swap bytes		   Currently unused
280 */
281static u64
282tioca_dma_d64(unsigned long paddr)
283{
284	dma_addr_t bus_addr;
285
286	bus_addr = PHYS_TO_TIODMA(paddr);
287
288	BUG_ON(!bus_addr);
289	BUG_ON(bus_addr >> 54);
290
291	/* Set upper nibble to Cache Coherent Memory op */
292	bus_addr |= (1UL << 60);
293
294	return bus_addr;
295}
296
297static u64
298tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
299{
300	struct tioca_common *tioca_common;
301	struct tioca __iomem *ca_base;
302	u64 ct_addr;
303	dma_addr_t bus_addr;
304	u32 node_upper;
305	u64 agp_dma_extn;
306	struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
307
308	tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
309	ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
310
311	ct_addr = PHYS_TO_TIODMA(paddr);
312	if (!ct_addr)
313		return 0;
314
315	bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
316	node_upper = ct_addr >> 48;
317
318	if (node_upper > 64) {
319		printk(KERN_ERR "%s:  coretalk addr 0x%p node id out "
320		       "of range\n", __func__, (void *)ct_addr);
321		return 0;
322	}
323
324	agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
325	if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
326		printk(KERN_ERR "%s:  coretalk upper node (%u) "
327		       "mismatch with ca_agp_dma_addr_extn (%llu)\n",
328		       __func__,
329		       node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
330		return 0;
331	}
332
333	return bus_addr;
334}
335
336/**
337 * tioca_dma_mapped - create a DMA mapping using a CA GART
338 * @pdev: linux pci_dev representing the function
339 * @paddr: host physical address to map
340 * @req_size: len (bytes) to map
341 *
342 * Map @paddr into CA address space using the GART mechanism.  The mapped
343 * dma_addr_t is guaranteed to be contiguous in CA bus space.
344 */
345static dma_addr_t
346tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
347{
348	int ps, ps_shift, entry, entries, mapsize;
349	u64 xio_addr, end_xio_addr;
350	struct tioca_common *tioca_common;
351	struct tioca_kernel *tioca_kern;
352	dma_addr_t bus_addr = 0;
353	struct tioca_dmamap *ca_dmamap;
354	void *map;
355	unsigned long flags;
356	struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
357
358	tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
359	tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
360
361	xio_addr = PHYS_TO_TIODMA(paddr);
362	if (!xio_addr)
363		return 0;
364
365	spin_lock_irqsave(&tioca_kern->ca_lock, flags);
366
367	/*
368	 * allocate a map struct
369	 */
370
371	ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
372	if (!ca_dmamap)
373		goto map_return;
374
375	/*
376	 * Locate free entries that can hold req_size.  Account for
377	 * unaligned start/length when allocating.
378	 */
379
380	ps = tioca_kern->ca_ap_pagesize;	/* will be power of 2 */
381	ps_shift = ffs(ps) - 1;
382	end_xio_addr = xio_addr + req_size - 1;
383
384	entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
385
386	map = tioca_kern->ca_pcigart_pagemap;
387	mapsize = tioca_kern->ca_pcigart_entries;
388
389	entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
390	if (entry >= mapsize) {
391		kfree(ca_dmamap);
392		goto map_return;
393	}
394
395	bitmap_set(map, entry, entries);
396
397	bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
398
399	ca_dmamap->cad_dma_addr = bus_addr;
400	ca_dmamap->cad_gart_size = entries;
401	ca_dmamap->cad_gart_entry = entry;
402	list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
403
404	if (xio_addr % ps) {
405		tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
406		bus_addr += xio_addr & (ps - 1);
407		xio_addr &= ~(ps - 1);
408		xio_addr += ps;
409		entry++;
410	}
411
412	while (xio_addr < end_xio_addr) {
413		tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
414		xio_addr += ps;
415		entry++;
416	}
417
418	tioca_tlbflush(tioca_kern);
419
420map_return:
421	spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
422	return bus_addr;
423}
424
425/**
426 * tioca_dma_unmap - release CA mapping resources
427 * @pdev: linux pci_dev representing the function
428 * @bus_addr: bus address returned by an earlier tioca_dma_map
429 * @dir: mapping direction (unused)
430 *
431 * Locate mapping resources associated with @bus_addr and release them.
432 * For mappings created using the direct modes (64 or 48) there are no
433 * resources to release.
434 */
435static void
436tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
437{
438	int i, entry;
439	struct tioca_common *tioca_common;
440	struct tioca_kernel *tioca_kern;
441	struct tioca_dmamap *map;
442	struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
443	unsigned long flags;
444
445	tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
446	tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
447
448	/* return straight away if this isn't be a mapped address */
449
450	if (bus_addr < tioca_kern->ca_pciap_base ||
451	    bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
452		return;
453
454	spin_lock_irqsave(&tioca_kern->ca_lock, flags);
455
456	list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
457	    if (map->cad_dma_addr == bus_addr)
458		break;
459
460	BUG_ON(map == NULL);
461
462	entry = map->cad_gart_entry;
463
464	for (i = 0; i < map->cad_gart_size; i++, entry++) {
465		clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
466		tioca_kern->ca_pcigart[entry] = 0;
467	}
468	tioca_tlbflush(tioca_kern);
469
470	list_del(&map->cad_list);
471	spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
472	kfree(map);
473}
474
475/**
476 * tioca_dma_map - map pages for PCI DMA
477 * @pdev: linux pci_dev representing the function
478 * @paddr: host physical address to map
479 * @byte_count: bytes to map
480 *
481 * This is the main wrapper for mapping host physical pages to CA PCI space.
482 * The mapping mode used is based on the devices dma_mask.  As a last resort
483 * use the GART mapped mode.
484 */
485static u64
486tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
487{
488	u64 mapaddr;
489
490	/*
491	 * Not supported for now ...
492	 */
493	if (dma_flags & SN_DMA_MSI)
494		return 0;
495
496	/*
497	 * If card is 64 or 48 bit addressable, use a direct mapping.  32
498	 * bit direct is so restrictive w.r.t. where the memory resides that
499	 * we don't use it even though CA has some support.
500	 */
501
502	if (pdev->dma_mask == ~0UL)
503		mapaddr = tioca_dma_d64(paddr);
504	else if (pdev->dma_mask == 0xffffffffffffUL)
505		mapaddr = tioca_dma_d48(pdev, paddr);
506	else
507		mapaddr = 0;
508
509	/* Last resort ... use PCI portion of CA GART */
510
511	if (mapaddr == 0)
512		mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
513
514	return mapaddr;
515}
516
517/**
518 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
519 * @irq: unused
520 * @arg: pointer to tioca_common struct for the given CA
521 *
522 * Handle a CA error interrupt.  Simply a wrapper around a SAL call which
523 * defers processing to the SGI prom.
524 */
525static irqreturn_t
526tioca_error_intr_handler(int irq, void *arg)
527{
528	struct tioca_common *soft = arg;
529	struct ia64_sal_retval ret_stuff;
530	u64 segment;
531	u64 busnum;
532	ret_stuff.status = 0;
533	ret_stuff.v0 = 0;
534
535	segment = soft->ca_common.bs_persist_segment;
536	busnum = soft->ca_common.bs_persist_busnum;
537
538	SAL_CALL_NOLOCK(ret_stuff,
539			(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
540			segment, busnum, 0, 0, 0, 0, 0);
541
542	return IRQ_HANDLED;
543}
544
545/**
546 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
547 * @prom_bussoft: Common prom/kernel struct representing the bus
548 *
549 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
550 * space.  Allocates and initializes a kernel-only area for a given CA,
551 * and sets up an irq for handling CA error interrupts.
552 *
553 * On successful setup, returns the kernel version of tioca_common back to
554 * the caller.
555 */
556static void *
557tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
558{
559	struct tioca_common *tioca_common;
560	struct tioca_kernel *tioca_kern;
561	struct pci_bus *bus;
562
563	/* sanity check prom rev */
564
565	if (is_shub1() && sn_sal_rev() < 0x0406) {
566		printk
567		    (KERN_ERR "%s:  SGI prom rev 4.06 or greater required "
568		     "for tioca support\n", __func__);
569		return NULL;
570	}
571
572	/*
573	 * Allocate kernel bus soft and copy from prom.
574	 */
575
576	tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
577	if (!tioca_common)
578		return NULL;
579
580	memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
581	tioca_common->ca_common.bs_base = (unsigned long)
582		ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
583			sizeof(struct tioca_common));
584
585	/* init kernel-private area */
586
587	tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
588	if (!tioca_kern) {
589		kfree(tioca_common);
590		return NULL;
591	}
592
593	tioca_kern->ca_common = tioca_common;
594	spin_lock_init(&tioca_kern->ca_lock);
595	INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
596	tioca_kern->ca_closest_node =
597	    nasid_to_cnodeid(tioca_common->ca_closest_nasid);
598	tioca_common->ca_kernel_private = (u64) tioca_kern;
599
600	bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
601		tioca_common->ca_common.bs_persist_busnum);
602	BUG_ON(!bus);
603	tioca_kern->ca_devices = &bus->devices;
604
605	/* init GART */
606
607	if (tioca_gart_init(tioca_kern) < 0) {
608		kfree(tioca_kern);
609		kfree(tioca_common);
610		return NULL;
611	}
612
613	tioca_gart_found++;
614	list_add(&tioca_kern->ca_list, &tioca_list);
615
616	if (request_irq(SGI_TIOCA_ERROR,
617			tioca_error_intr_handler,
618			IRQF_SHARED, "TIOCA error", (void *)tioca_common))
619		printk(KERN_WARNING
620		       "%s:  Unable to get irq %d.  "
621		       "Error interrupts won't be routed for TIOCA bus %d\n",
622		       __func__, SGI_TIOCA_ERROR,
623		       (int)tioca_common->ca_common.bs_persist_busnum);
624
625	sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
626
627	/* Setup locality information */
628	controller->node = tioca_kern->ca_closest_node;
629	return tioca_common;
630}
631
632static struct sn_pcibus_provider tioca_pci_interfaces = {
633	.dma_map = tioca_dma_map,
634	.dma_map_consistent = tioca_dma_map,
635	.dma_unmap = tioca_dma_unmap,
636	.bus_fixup = tioca_bus_fixup,
637	.force_interrupt = NULL,
638	.target_interrupt = NULL
639};
640
641/**
642 * tioca_init_provider - init SN PCI provider ops for TIO CA
643 */
644int
645tioca_init_provider(void)
646{
647	sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
648	return 0;
649}
650