1/* 2 * ioport.c: Simple io mapping allocator. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * 7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. 8 * 9 * 2000/01/29 10 * <rth> zait: as long as pci_alloc_consistent produces something addressable, 11 * things are ok. 12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a 13 * pointer into the big page mapping 14 * <rth> zait: so what? 15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page())) 16 * <zaitcev> Hmm 17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). 18 * So far so good. 19 * <zaitcev> Now, driver calls pci_free_consistent(with result of 20 * remap_it_my_way()). 21 * <zaitcev> How do you find the address to pass to free_pages()? 22 * <rth> zait: walk the page tables? It's only two or three level after all. 23 * <rth> zait: you have to walk them anyway to remove the mapping. 24 * <zaitcev> Hmm 25 * <zaitcev> Sounds reasonable 26 */ 27 28#include <linux/module.h> 29#include <linux/sched.h> 30#include <linux/kernel.h> 31#include <linux/errno.h> 32#include <linux/types.h> 33#include <linux/ioport.h> 34#include <linux/mm.h> 35#include <linux/slab.h> 36#include <linux/pci.h> /* struct pci_dev */ 37#include <linux/proc_fs.h> 38#include <linux/seq_file.h> 39#include <linux/scatterlist.h> 40#include <linux/of_device.h> 41 42#include <asm/io.h> 43#include <asm/vaddrs.h> 44#include <asm/oplib.h> 45#include <asm/prom.h> 46#include <asm/page.h> 47#include <asm/pgalloc.h> 48#include <asm/dma.h> 49#include <asm/iommu.h> 50#include <asm/io-unit.h> 51#include <asm/leon.h> 52 53#ifdef CONFIG_SPARC_LEON 54#define mmu_inval_dma_area(p, l) leon_flush_dcache_all() 55#else 56#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 57#endif 58 59static struct resource *_sparc_find_resource(struct resource *r, 60 unsigned long); 61 62static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); 63static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, 64 unsigned long size, char *name); 65static void _sparc_free_io(struct resource *res); 66 67static void register_proc_sparc_ioport(void); 68 69/* This points to the next to use virtual memory for DVMA mappings */ 70static struct resource _sparc_dvma = { 71 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 72}; 73/* This points to the start of I/O mappings, cluable from outside. */ 74/*ext*/ struct resource sparc_iomap = { 75 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 76}; 77 78/* 79 * Our mini-allocator... 80 * Boy this is gross! We need it because we must map I/O for 81 * timers and interrupt controller before the kmalloc is available. 82 */ 83 84#define XNMLN 15 85#define XNRES 10 /* SS-10 uses 8 */ 86 87struct xresource { 88 struct resource xres; /* Must be first */ 89 int xflag; /* 1 == used */ 90 char xname[XNMLN+1]; 91}; 92 93static struct xresource xresv[XNRES]; 94 95static struct xresource *xres_alloc(void) { 96 struct xresource *xrp; 97 int n; 98 99 xrp = xresv; 100 for (n = 0; n < XNRES; n++) { 101 if (xrp->xflag == 0) { 102 xrp->xflag = 1; 103 return xrp; 104 } 105 xrp++; 106 } 107 return NULL; 108} 109 110static void xres_free(struct xresource *xrp) { 111 xrp->xflag = 0; 112} 113 114/* 115 * These are typically used in PCI drivers 116 * which are trying to be cross-platform. 117 * 118 * Bus type is always zero on IIep. 119 */ 120void __iomem *ioremap(unsigned long offset, unsigned long size) 121{ 122 char name[14]; 123 124 sprintf(name, "phys_%08x", (u32)offset); 125 return _sparc_alloc_io(0, offset, size, name); 126} 127EXPORT_SYMBOL(ioremap); 128 129/* 130 * Comlimentary to ioremap(). 131 */ 132void iounmap(volatile void __iomem *virtual) 133{ 134 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; 135 struct resource *res; 136 137 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { 138 printk("free_io/iounmap: cannot free %lx\n", vaddr); 139 return; 140 } 141 _sparc_free_io(res); 142 143 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { 144 xres_free((struct xresource *)res); 145 } else { 146 kfree(res); 147 } 148} 149EXPORT_SYMBOL(iounmap); 150 151void __iomem *of_ioremap(struct resource *res, unsigned long offset, 152 unsigned long size, char *name) 153{ 154 return _sparc_alloc_io(res->flags & 0xF, 155 res->start + offset, 156 size, name); 157} 158EXPORT_SYMBOL(of_ioremap); 159 160void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) 161{ 162 iounmap(base); 163} 164EXPORT_SYMBOL(of_iounmap); 165 166/* 167 * Meat of mapping 168 */ 169static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, 170 unsigned long size, char *name) 171{ 172 static int printed_full; 173 struct xresource *xres; 174 struct resource *res; 175 char *tack; 176 int tlen; 177 void __iomem *va; /* P3 diag */ 178 179 if (name == NULL) name = "???"; 180 181 if ((xres = xres_alloc()) != 0) { 182 tack = xres->xname; 183 res = &xres->xres; 184 } else { 185 if (!printed_full) { 186 printk("ioremap: done with statics, switching to malloc\n"); 187 printed_full = 1; 188 } 189 tlen = strlen(name); 190 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); 191 if (tack == NULL) return NULL; 192 memset(tack, 0, sizeof(struct resource)); 193 res = (struct resource *) tack; 194 tack += sizeof (struct resource); 195 } 196 197 strlcpy(tack, name, XNMLN+1); 198 res->name = tack; 199 200 va = _sparc_ioremap(res, busno, phys, size); 201 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ 202 return va; 203} 204 205/* 206 */ 207static void __iomem * 208_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) 209{ 210 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); 211 212 if (allocate_resource(&sparc_iomap, res, 213 (offset + sz + PAGE_SIZE-1) & PAGE_MASK, 214 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { 215 /* Usually we cannot see printks in this case. */ 216 prom_printf("alloc_io_res(%s): cannot occupy\n", 217 (res->name != NULL)? res->name: "???"); 218 prom_halt(); 219 } 220 221 pa &= PAGE_MASK; 222 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); 223 224 return (void __iomem *)(unsigned long)(res->start + offset); 225} 226 227/* 228 * Comlimentary to _sparc_ioremap(). 229 */ 230static void _sparc_free_io(struct resource *res) 231{ 232 unsigned long plen; 233 234 plen = res->end - res->start + 1; 235 BUG_ON((plen & (PAGE_SIZE-1)) != 0); 236 sparc_unmapiorange(res->start, plen); 237 release_resource(res); 238} 239 240#ifdef CONFIG_SBUS 241 242void sbus_set_sbus64(struct device *dev, int x) 243{ 244 printk("sbus_set_sbus64: unsupported\n"); 245} 246EXPORT_SYMBOL(sbus_set_sbus64); 247 248/* 249 * Allocate a chunk of memory suitable for DMA. 250 * Typically devices use them for control blocks. 251 * CPU may access them without any explicit flushing. 252 */ 253static void *sbus_alloc_coherent(struct device *dev, size_t len, 254 dma_addr_t *dma_addrp, gfp_t gfp) 255{ 256 struct platform_device *op = to_platform_device(dev); 257 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 258 unsigned long va; 259 struct resource *res; 260 int order; 261 262 if (len <= 0) { 263 return NULL; 264 } 265 if (len > 256*1024) { /* __get_free_pages() limit */ 266 return NULL; 267 } 268 269 order = get_order(len_total); 270 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) 271 goto err_nopages; 272 273 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) 274 goto err_nomem; 275 276 if (allocate_resource(&_sparc_dvma, res, len_total, 277 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { 278 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); 279 goto err_nova; 280 } 281 mmu_inval_dma_area(va, len_total); 282 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); 283 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) 284 goto err_noiommu; 285 286 res->name = op->dev.of_node->name; 287 288 return (void *)(unsigned long)res->start; 289 290err_noiommu: 291 release_resource(res); 292err_nova: 293 free_pages(va, order); 294err_nomem: 295 kfree(res); 296err_nopages: 297 return NULL; 298} 299 300static void sbus_free_coherent(struct device *dev, size_t n, void *p, 301 dma_addr_t ba) 302{ 303 struct resource *res; 304 struct page *pgv; 305 306 if ((res = _sparc_find_resource(&_sparc_dvma, 307 (unsigned long)p)) == NULL) { 308 printk("sbus_free_consistent: cannot free %p\n", p); 309 return; 310 } 311 312 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { 313 printk("sbus_free_consistent: unaligned va %p\n", p); 314 return; 315 } 316 317 n = (n + PAGE_SIZE-1) & PAGE_MASK; 318 if ((res->end-res->start)+1 != n) { 319 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", 320 (long)((res->end-res->start)+1), n); 321 return; 322 } 323 324 release_resource(res); 325 kfree(res); 326 327 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ 328 pgv = virt_to_page(p); 329 mmu_unmap_dma_area(dev, ba, n); 330 331 __free_pages(pgv, get_order(n)); 332} 333 334/* 335 * Map a chunk of memory so that devices can see it. 336 * CPU view of this memory may be inconsistent with 337 * a device view and explicit flushing is necessary. 338 */ 339static dma_addr_t sbus_map_page(struct device *dev, struct page *page, 340 unsigned long offset, size_t len, 341 enum dma_data_direction dir, 342 struct dma_attrs *attrs) 343{ 344 void *va = page_address(page) + offset; 345 346 if (len <= 0) { 347 return 0; 348 } 349 if (len > 256*1024) { /* __get_free_pages() limit */ 350 return 0; 351 } 352 return mmu_get_scsi_one(dev, va, len); 353} 354 355static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, 356 enum dma_data_direction dir, struct dma_attrs *attrs) 357{ 358 mmu_release_scsi_one(dev, ba, n); 359} 360 361static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, 362 enum dma_data_direction dir, struct dma_attrs *attrs) 363{ 364 mmu_get_scsi_sgl(dev, sg, n); 365 366 return n; 367} 368 369static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, 370 enum dma_data_direction dir, struct dma_attrs *attrs) 371{ 372 mmu_release_scsi_sgl(dev, sg, n); 373} 374 375static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 376 int n, enum dma_data_direction dir) 377{ 378 BUG(); 379} 380 381static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 382 int n, enum dma_data_direction dir) 383{ 384 BUG(); 385} 386 387struct dma_map_ops sbus_dma_ops = { 388 .alloc_coherent = sbus_alloc_coherent, 389 .free_coherent = sbus_free_coherent, 390 .map_page = sbus_map_page, 391 .unmap_page = sbus_unmap_page, 392 .map_sg = sbus_map_sg, 393 .unmap_sg = sbus_unmap_sg, 394 .sync_sg_for_cpu = sbus_sync_sg_for_cpu, 395 .sync_sg_for_device = sbus_sync_sg_for_device, 396}; 397 398struct dma_map_ops *dma_ops = &sbus_dma_ops; 399EXPORT_SYMBOL(dma_ops); 400 401static int __init sparc_register_ioport(void) 402{ 403 register_proc_sparc_ioport(); 404 405 return 0; 406} 407 408arch_initcall(sparc_register_ioport); 409 410#endif /* CONFIG_SBUS */ 411 412#ifdef CONFIG_PCI 413 414/* Allocate and map kernel buffer using consistent mode DMA for a device. 415 * hwdev should be valid struct pci_dev pointer for PCI devices. 416 */ 417static void *pci32_alloc_coherent(struct device *dev, size_t len, 418 dma_addr_t *pba, gfp_t gfp) 419{ 420 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 421 unsigned long va; 422 struct resource *res; 423 int order; 424 425 if (len == 0) { 426 return NULL; 427 } 428 if (len > 256*1024) { /* __get_free_pages() limit */ 429 return NULL; 430 } 431 432 order = get_order(len_total); 433 va = __get_free_pages(GFP_KERNEL, order); 434 if (va == 0) { 435 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); 436 return NULL; 437 } 438 439 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { 440 free_pages(va, order); 441 printk("pci_alloc_consistent: no core\n"); 442 return NULL; 443 } 444 445 if (allocate_resource(&_sparc_dvma, res, len_total, 446 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { 447 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); 448 free_pages(va, order); 449 kfree(res); 450 return NULL; 451 } 452 mmu_inval_dma_area(va, len_total); 453 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); 454 455 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 456 return (void *) res->start; 457} 458 459/* Free and unmap a consistent DMA buffer. 460 * cpu_addr is what was returned from pci_alloc_consistent, 461 * size must be the same as what as passed into pci_alloc_consistent, 462 * and likewise dma_addr must be the same as what *dma_addrp was set to. 463 * 464 * References to the memory and mappings associated with cpu_addr/dma_addr 465 * past this call are illegal. 466 */ 467static void pci32_free_coherent(struct device *dev, size_t n, void *p, 468 dma_addr_t ba) 469{ 470 struct resource *res; 471 unsigned long pgp; 472 473 if ((res = _sparc_find_resource(&_sparc_dvma, 474 (unsigned long)p)) == NULL) { 475 printk("pci_free_consistent: cannot free %p\n", p); 476 return; 477 } 478 479 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { 480 printk("pci_free_consistent: unaligned va %p\n", p); 481 return; 482 } 483 484 n = (n + PAGE_SIZE-1) & PAGE_MASK; 485 if ((res->end-res->start)+1 != n) { 486 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", 487 (long)((res->end-res->start)+1), (long)n); 488 return; 489 } 490 491 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ 492 mmu_inval_dma_area(pgp, n); 493 sparc_unmapiorange((unsigned long)p, n); 494 495 release_resource(res); 496 kfree(res); 497 498 free_pages(pgp, get_order(n)); 499} 500 501/* 502 * Same as pci_map_single, but with pages. 503 */ 504static dma_addr_t pci32_map_page(struct device *dev, struct page *page, 505 unsigned long offset, size_t size, 506 enum dma_data_direction dir, 507 struct dma_attrs *attrs) 508{ 509 /* IIep is write-through, not flushing. */ 510 return page_to_phys(page) + offset; 511} 512 513/* Map a set of buffers described by scatterlist in streaming 514 * mode for DMA. This is the scather-gather version of the 515 * above pci_map_single interface. Here the scatter gather list 516 * elements are each tagged with the appropriate dma address 517 * and length. They are obtained via sg_dma_{address,length}(SG). 518 * 519 * NOTE: An implementation may be able to use a smaller number of 520 * DMA address/length pairs than there are SG table elements. 521 * (for example via virtual mapping capabilities) 522 * The routine returns the number of addr/length pairs actually 523 * used, at most nents. 524 * 525 * Device ownership issues as mentioned above for pci_map_single are 526 * the same here. 527 */ 528static int pci32_map_sg(struct device *device, struct scatterlist *sgl, 529 int nents, enum dma_data_direction dir, 530 struct dma_attrs *attrs) 531{ 532 struct scatterlist *sg; 533 int n; 534 535 /* IIep is write-through, not flushing. */ 536 for_each_sg(sgl, sg, nents, n) { 537 BUG_ON(page_address(sg_page(sg)) == NULL); 538 sg->dma_address = virt_to_phys(sg_virt(sg)); 539 sg->dma_length = sg->length; 540 } 541 return nents; 542} 543 544/* Unmap a set of streaming mode DMA translations. 545 * Again, cpu read rules concerning calls here are the same as for 546 * pci_unmap_single() above. 547 */ 548static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, 549 int nents, enum dma_data_direction dir, 550 struct dma_attrs *attrs) 551{ 552 struct scatterlist *sg; 553 int n; 554 555 if (dir != PCI_DMA_TODEVICE) { 556 for_each_sg(sgl, sg, nents, n) { 557 BUG_ON(page_address(sg_page(sg)) == NULL); 558 mmu_inval_dma_area( 559 (unsigned long) page_address(sg_page(sg)), 560 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 561 } 562 } 563} 564 565/* Make physical memory consistent for a single 566 * streaming mode DMA translation before or after a transfer. 567 * 568 * If you perform a pci_map_single() but wish to interrogate the 569 * buffer using the cpu, yet do not wish to teardown the PCI dma 570 * mapping, you must call this function before doing so. At the 571 * next point you give the PCI dma address back to the card, you 572 * must first perform a pci_dma_sync_for_device, and then the 573 * device again owns the buffer. 574 */ 575static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, 576 size_t size, enum dma_data_direction dir) 577{ 578 if (dir != PCI_DMA_TODEVICE) { 579 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 580 (size + PAGE_SIZE-1) & PAGE_MASK); 581 } 582} 583 584static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, 585 size_t size, enum dma_data_direction dir) 586{ 587 if (dir != PCI_DMA_TODEVICE) { 588 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 589 (size + PAGE_SIZE-1) & PAGE_MASK); 590 } 591} 592 593/* Make physical memory consistent for a set of streaming 594 * mode DMA translations after a transfer. 595 * 596 * The same as pci_dma_sync_single_* but for a scatter-gather list, 597 * same rules and usage. 598 */ 599static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, 600 int nents, enum dma_data_direction dir) 601{ 602 struct scatterlist *sg; 603 int n; 604 605 if (dir != PCI_DMA_TODEVICE) { 606 for_each_sg(sgl, sg, nents, n) { 607 BUG_ON(page_address(sg_page(sg)) == NULL); 608 mmu_inval_dma_area( 609 (unsigned long) page_address(sg_page(sg)), 610 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 611 } 612 } 613} 614 615static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, 616 int nents, enum dma_data_direction dir) 617{ 618 struct scatterlist *sg; 619 int n; 620 621 if (dir != PCI_DMA_TODEVICE) { 622 for_each_sg(sgl, sg, nents, n) { 623 BUG_ON(page_address(sg_page(sg)) == NULL); 624 mmu_inval_dma_area( 625 (unsigned long) page_address(sg_page(sg)), 626 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 627 } 628 } 629} 630 631struct dma_map_ops pci32_dma_ops = { 632 .alloc_coherent = pci32_alloc_coherent, 633 .free_coherent = pci32_free_coherent, 634 .map_page = pci32_map_page, 635 .map_sg = pci32_map_sg, 636 .unmap_sg = pci32_unmap_sg, 637 .sync_single_for_cpu = pci32_sync_single_for_cpu, 638 .sync_single_for_device = pci32_sync_single_for_device, 639 .sync_sg_for_cpu = pci32_sync_sg_for_cpu, 640 .sync_sg_for_device = pci32_sync_sg_for_device, 641}; 642EXPORT_SYMBOL(pci32_dma_ops); 643 644#endif /* CONFIG_PCI */ 645 646/* 647 * Return whether the given PCI device DMA address mask can be 648 * supported properly. For example, if your device can only drive the 649 * low 24-bits during PCI bus mastering, then you would pass 650 * 0x00ffffff as the mask to this function. 651 */ 652int dma_supported(struct device *dev, u64 mask) 653{ 654#ifdef CONFIG_PCI 655 if (dev->bus == &pci_bus_type) 656 return 1; 657#endif 658 return 0; 659} 660EXPORT_SYMBOL(dma_supported); 661 662#ifdef CONFIG_PROC_FS 663 664static int sparc_io_proc_show(struct seq_file *m, void *v) 665{ 666 struct resource *root = m->private, *r; 667 const char *nm; 668 669 for (r = root->child; r != NULL; r = r->sibling) { 670 if ((nm = r->name) == 0) nm = "???"; 671 seq_printf(m, "%016llx-%016llx: %s\n", 672 (unsigned long long)r->start, 673 (unsigned long long)r->end, nm); 674 } 675 676 return 0; 677} 678 679static int sparc_io_proc_open(struct inode *inode, struct file *file) 680{ 681 return single_open(file, sparc_io_proc_show, PDE(inode)->data); 682} 683 684static const struct file_operations sparc_io_proc_fops = { 685 .owner = THIS_MODULE, 686 .open = sparc_io_proc_open, 687 .read = seq_read, 688 .llseek = seq_lseek, 689 .release = single_release, 690}; 691#endif /* CONFIG_PROC_FS */ 692 693static struct resource *_sparc_find_resource(struct resource *root, 694 unsigned long hit) 695{ 696 struct resource *tmp; 697 698 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { 699 if (tmp->start <= hit && tmp->end >= hit) 700 return tmp; 701 } 702 return NULL; 703} 704 705static void register_proc_sparc_ioport(void) 706{ 707#ifdef CONFIG_PROC_FS 708 proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap); 709 proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma); 710#endif 711} 712