1/* pci_sun4v.c: SUN4V specific PCI controller support. 2 * 3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6#include <linux/kernel.h> 7#include <linux/types.h> 8#include <linux/pci.h> 9#include <linux/init.h> 10#include <linux/slab.h> 11#include <linux/interrupt.h> 12#include <linux/percpu.h> 13#include <linux/irq.h> 14#include <linux/msi.h> 15#include <linux/log2.h> 16#include <linux/of_device.h> 17 18#include <asm/iommu.h> 19#include <asm/irq.h> 20#include <asm/hypervisor.h> 21#include <asm/prom.h> 22 23#include "pci_impl.h" 24#include "iommu_common.h" 25 26#include "pci_sun4v.h" 27 28#define DRIVER_NAME "pci_sun4v" 29#define PFX DRIVER_NAME ": " 30 31static unsigned long vpci_major = 1; 32static unsigned long vpci_minor = 1; 33 34#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 35 36struct iommu_batch { 37 struct device *dev; /* Device mapping is for. */ 38 unsigned long prot; /* IOMMU page protections */ 39 unsigned long entry; /* Index into IOTSB. */ 40 u64 *pglist; /* List of physical pages */ 41 unsigned long npages; /* Number of pages in list. */ 42}; 43 44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); 45static int iommu_batch_initialized; 46 47/* Interrupts must be disabled. */ 48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) 49{ 50 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 51 52 p->dev = dev; 53 p->prot = prot; 54 p->entry = entry; 55 p->npages = 0; 56} 57 58/* Interrupts must be disabled. */ 59static long iommu_batch_flush(struct iommu_batch *p) 60{ 61 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 62 unsigned long devhandle = pbm->devhandle; 63 unsigned long prot = p->prot; 64 unsigned long entry = p->entry; 65 u64 *pglist = p->pglist; 66 unsigned long npages = p->npages; 67 68 while (npages != 0) { 69 long num; 70 71 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 72 npages, prot, __pa(pglist)); 73 if (unlikely(num < 0)) { 74 if (printk_ratelimit()) 75 printk("iommu_batch_flush: IOMMU map of " 76 "[%08lx:%08llx:%lx:%lx:%lx] failed with " 77 "status %ld\n", 78 devhandle, HV_PCI_TSBID(0, entry), 79 npages, prot, __pa(pglist), num); 80 return -1; 81 } 82 83 entry += num; 84 npages -= num; 85 pglist += num; 86 } 87 88 p->entry = entry; 89 p->npages = 0; 90 91 return 0; 92} 93 94static inline void iommu_batch_new_entry(unsigned long entry) 95{ 96 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 97 98 if (p->entry + p->npages == entry) 99 return; 100 if (p->entry != ~0UL) 101 iommu_batch_flush(p); 102 p->entry = entry; 103} 104 105/* Interrupts must be disabled. */ 106static inline long iommu_batch_add(u64 phys_page) 107{ 108 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 109 110 BUG_ON(p->npages >= PGLIST_NENTS); 111 112 p->pglist[p->npages++] = phys_page; 113 if (p->npages == PGLIST_NENTS) 114 return iommu_batch_flush(p); 115 116 return 0; 117} 118 119/* Interrupts must be disabled. */ 120static inline long iommu_batch_end(void) 121{ 122 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 123 124 BUG_ON(p->npages >= PGLIST_NENTS); 125 126 return iommu_batch_flush(p); 127} 128 129static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 130 dma_addr_t *dma_addrp, gfp_t gfp) 131{ 132 unsigned long flags, order, first_page, npages, n; 133 struct iommu *iommu; 134 struct page *page; 135 void *ret; 136 long entry; 137 int nid; 138 139 size = IO_PAGE_ALIGN(size); 140 order = get_order(size); 141 if (unlikely(order >= MAX_ORDER)) 142 return NULL; 143 144 npages = size >> IO_PAGE_SHIFT; 145 146 nid = dev->archdata.numa_node; 147 page = alloc_pages_node(nid, gfp, order); 148 if (unlikely(!page)) 149 return NULL; 150 151 first_page = (unsigned long) page_address(page); 152 memset((char *)first_page, 0, PAGE_SIZE << order); 153 154 iommu = dev->archdata.iommu; 155 156 spin_lock_irqsave(&iommu->lock, flags); 157 entry = iommu_range_alloc(dev, iommu, npages, NULL); 158 spin_unlock_irqrestore(&iommu->lock, flags); 159 160 if (unlikely(entry == DMA_ERROR_CODE)) 161 goto range_alloc_fail; 162 163 *dma_addrp = (iommu->page_table_map_base + 164 (entry << IO_PAGE_SHIFT)); 165 ret = (void *) first_page; 166 first_page = __pa(first_page); 167 168 local_irq_save(flags); 169 170 iommu_batch_start(dev, 171 (HV_PCI_MAP_ATTR_READ | 172 HV_PCI_MAP_ATTR_WRITE), 173 entry); 174 175 for (n = 0; n < npages; n++) { 176 long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); 177 if (unlikely(err < 0L)) 178 goto iommu_map_fail; 179 } 180 181 if (unlikely(iommu_batch_end() < 0L)) 182 goto iommu_map_fail; 183 184 local_irq_restore(flags); 185 186 return ret; 187 188iommu_map_fail: 189 /* Interrupts are disabled. */ 190 spin_lock(&iommu->lock); 191 iommu_range_free(iommu, *dma_addrp, npages); 192 spin_unlock_irqrestore(&iommu->lock, flags); 193 194range_alloc_fail: 195 free_pages(first_page, order); 196 return NULL; 197} 198 199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, 200 dma_addr_t dvma) 201{ 202 struct pci_pbm_info *pbm; 203 struct iommu *iommu; 204 unsigned long flags, order, npages, entry; 205 u32 devhandle; 206 207 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 208 iommu = dev->archdata.iommu; 209 pbm = dev->archdata.host_controller; 210 devhandle = pbm->devhandle; 211 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 212 213 spin_lock_irqsave(&iommu->lock, flags); 214 215 iommu_range_free(iommu, dvma, npages); 216 217 do { 218 unsigned long num; 219 220 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 221 npages); 222 entry += num; 223 npages -= num; 224 } while (npages != 0); 225 226 spin_unlock_irqrestore(&iommu->lock, flags); 227 228 order = get_order(size); 229 if (order < 10) 230 free_pages((unsigned long)cpu, order); 231} 232 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 234 unsigned long offset, size_t sz, 235 enum dma_data_direction direction, 236 struct dma_attrs *attrs) 237{ 238 struct iommu *iommu; 239 unsigned long flags, npages, oaddr; 240 unsigned long i, base_paddr; 241 u32 bus_addr, ret; 242 unsigned long prot; 243 long entry; 244 245 iommu = dev->archdata.iommu; 246 247 if (unlikely(direction == DMA_NONE)) 248 goto bad; 249 250 oaddr = (unsigned long)(page_address(page) + offset); 251 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 252 npages >>= IO_PAGE_SHIFT; 253 254 spin_lock_irqsave(&iommu->lock, flags); 255 entry = iommu_range_alloc(dev, iommu, npages, NULL); 256 spin_unlock_irqrestore(&iommu->lock, flags); 257 258 if (unlikely(entry == DMA_ERROR_CODE)) 259 goto bad; 260 261 bus_addr = (iommu->page_table_map_base + 262 (entry << IO_PAGE_SHIFT)); 263 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 264 base_paddr = __pa(oaddr & IO_PAGE_MASK); 265 prot = HV_PCI_MAP_ATTR_READ; 266 if (direction != DMA_TO_DEVICE) 267 prot |= HV_PCI_MAP_ATTR_WRITE; 268 269 local_irq_save(flags); 270 271 iommu_batch_start(dev, prot, entry); 272 273 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 274 long err = iommu_batch_add(base_paddr); 275 if (unlikely(err < 0L)) 276 goto iommu_map_fail; 277 } 278 if (unlikely(iommu_batch_end() < 0L)) 279 goto iommu_map_fail; 280 281 local_irq_restore(flags); 282 283 return ret; 284 285bad: 286 if (printk_ratelimit()) 287 WARN_ON(1); 288 return DMA_ERROR_CODE; 289 290iommu_map_fail: 291 /* Interrupts are disabled. */ 292 spin_lock(&iommu->lock); 293 iommu_range_free(iommu, bus_addr, npages); 294 spin_unlock_irqrestore(&iommu->lock, flags); 295 296 return DMA_ERROR_CODE; 297} 298 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 300 size_t sz, enum dma_data_direction direction, 301 struct dma_attrs *attrs) 302{ 303 struct pci_pbm_info *pbm; 304 struct iommu *iommu; 305 unsigned long flags, npages; 306 long entry; 307 u32 devhandle; 308 309 if (unlikely(direction == DMA_NONE)) { 310 if (printk_ratelimit()) 311 WARN_ON(1); 312 return; 313 } 314 315 iommu = dev->archdata.iommu; 316 pbm = dev->archdata.host_controller; 317 devhandle = pbm->devhandle; 318 319 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 320 npages >>= IO_PAGE_SHIFT; 321 bus_addr &= IO_PAGE_MASK; 322 323 spin_lock_irqsave(&iommu->lock, flags); 324 325 iommu_range_free(iommu, bus_addr, npages); 326 327 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 328 do { 329 unsigned long num; 330 331 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 332 npages); 333 entry += num; 334 npages -= num; 335 } while (npages != 0); 336 337 spin_unlock_irqrestore(&iommu->lock, flags); 338} 339 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 341 int nelems, enum dma_data_direction direction, 342 struct dma_attrs *attrs) 343{ 344 struct scatterlist *s, *outs, *segstart; 345 unsigned long flags, handle, prot; 346 dma_addr_t dma_next = 0, dma_addr; 347 unsigned int max_seg_size; 348 unsigned long seg_boundary_size; 349 int outcount, incount, i; 350 struct iommu *iommu; 351 unsigned long base_shift; 352 long err; 353 354 BUG_ON(direction == DMA_NONE); 355 356 iommu = dev->archdata.iommu; 357 if (nelems == 0 || !iommu) 358 return 0; 359 360 prot = HV_PCI_MAP_ATTR_READ; 361 if (direction != DMA_TO_DEVICE) 362 prot |= HV_PCI_MAP_ATTR_WRITE; 363 364 outs = s = segstart = &sglist[0]; 365 outcount = 1; 366 incount = nelems; 367 handle = 0; 368 369 /* Init first segment length for backout at failure */ 370 outs->dma_length = 0; 371 372 spin_lock_irqsave(&iommu->lock, flags); 373 374 iommu_batch_start(dev, prot, ~0UL); 375 376 max_seg_size = dma_get_max_seg_size(dev); 377 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 378 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 379 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; 380 for_each_sg(sglist, s, nelems, i) { 381 unsigned long paddr, npages, entry, out_entry = 0, slen; 382 383 slen = s->length; 384 /* Sanity check */ 385 if (slen == 0) { 386 dma_next = 0; 387 continue; 388 } 389 /* Allocate iommu entries for that segment */ 390 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 391 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 392 entry = iommu_range_alloc(dev, iommu, npages, &handle); 393 394 /* Handle failure */ 395 if (unlikely(entry == DMA_ERROR_CODE)) { 396 if (printk_ratelimit()) 397 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 398 " npages %lx\n", iommu, paddr, npages); 399 goto iommu_map_failed; 400 } 401 402 iommu_batch_new_entry(entry); 403 404 /* Convert entry to a dma_addr_t */ 405 dma_addr = iommu->page_table_map_base + 406 (entry << IO_PAGE_SHIFT); 407 dma_addr |= (s->offset & ~IO_PAGE_MASK); 408 409 /* Insert into HW table */ 410 paddr &= IO_PAGE_MASK; 411 while (npages--) { 412 err = iommu_batch_add(paddr); 413 if (unlikely(err < 0L)) 414 goto iommu_map_failed; 415 paddr += IO_PAGE_SIZE; 416 } 417 418 /* If we are in an open segment, try merging */ 419 if (segstart != s) { 420 /* We cannot merge if: 421 * - allocated dma_addr isn't contiguous to previous allocation 422 */ 423 if ((dma_addr != dma_next) || 424 (outs->dma_length + s->length > max_seg_size) || 425 (is_span_boundary(out_entry, base_shift, 426 seg_boundary_size, outs, s))) { 427 /* Can't merge: create a new segment */ 428 segstart = s; 429 outcount++; 430 outs = sg_next(outs); 431 } else { 432 outs->dma_length += s->length; 433 } 434 } 435 436 if (segstart == s) { 437 /* This is a new segment, fill entries */ 438 outs->dma_address = dma_addr; 439 outs->dma_length = slen; 440 out_entry = entry; 441 } 442 443 /* Calculate next page pointer for contiguous check */ 444 dma_next = dma_addr + slen; 445 } 446 447 err = iommu_batch_end(); 448 449 if (unlikely(err < 0L)) 450 goto iommu_map_failed; 451 452 spin_unlock_irqrestore(&iommu->lock, flags); 453 454 if (outcount < incount) { 455 outs = sg_next(outs); 456 outs->dma_address = DMA_ERROR_CODE; 457 outs->dma_length = 0; 458 } 459 460 return outcount; 461 462iommu_map_failed: 463 for_each_sg(sglist, s, nelems, i) { 464 if (s->dma_length != 0) { 465 unsigned long vaddr, npages; 466 467 vaddr = s->dma_address & IO_PAGE_MASK; 468 npages = iommu_num_pages(s->dma_address, s->dma_length, 469 IO_PAGE_SIZE); 470 iommu_range_free(iommu, vaddr, npages); 471 s->dma_address = DMA_ERROR_CODE; 472 s->dma_length = 0; 473 } 474 if (s == outs) 475 break; 476 } 477 spin_unlock_irqrestore(&iommu->lock, flags); 478 479 return 0; 480} 481 482static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 483 int nelems, enum dma_data_direction direction, 484 struct dma_attrs *attrs) 485{ 486 struct pci_pbm_info *pbm; 487 struct scatterlist *sg; 488 struct iommu *iommu; 489 unsigned long flags; 490 u32 devhandle; 491 492 BUG_ON(direction == DMA_NONE); 493 494 iommu = dev->archdata.iommu; 495 pbm = dev->archdata.host_controller; 496 devhandle = pbm->devhandle; 497 498 spin_lock_irqsave(&iommu->lock, flags); 499 500 sg = sglist; 501 while (nelems--) { 502 dma_addr_t dma_handle = sg->dma_address; 503 unsigned int len = sg->dma_length; 504 unsigned long npages, entry; 505 506 if (!len) 507 break; 508 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 509 iommu_range_free(iommu, dma_handle, npages); 510 511 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 512 while (npages) { 513 unsigned long num; 514 515 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 516 npages); 517 entry += num; 518 npages -= num; 519 } 520 521 sg = sg_next(sg); 522 } 523 524 spin_unlock_irqrestore(&iommu->lock, flags); 525} 526 527static struct dma_map_ops sun4v_dma_ops = { 528 .alloc_coherent = dma_4v_alloc_coherent, 529 .free_coherent = dma_4v_free_coherent, 530 .map_page = dma_4v_map_page, 531 .unmap_page = dma_4v_unmap_page, 532 .map_sg = dma_4v_map_sg, 533 .unmap_sg = dma_4v_unmap_sg, 534}; 535 536static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 537 struct device *parent) 538{ 539 struct property *prop; 540 struct device_node *dp; 541 542 dp = pbm->op->dev.of_node; 543 prop = of_find_property(dp, "66mhz-capable", NULL); 544 pbm->is_66mhz_capable = (prop != NULL); 545 pbm->pci_bus = pci_scan_one_pbm(pbm, parent); 546 547} 548 549static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm, 550 struct iommu *iommu) 551{ 552 struct iommu_arena *arena = &iommu->arena; 553 unsigned long i, cnt = 0; 554 u32 devhandle; 555 556 devhandle = pbm->devhandle; 557 for (i = 0; i < arena->limit; i++) { 558 unsigned long ret, io_attrs, ra; 559 560 ret = pci_sun4v_iommu_getmap(devhandle, 561 HV_PCI_TSBID(0, i), 562 &io_attrs, &ra); 563 if (ret == HV_EOK) { 564 if (page_in_phys_avail(ra)) { 565 pci_sun4v_iommu_demap(devhandle, 566 HV_PCI_TSBID(0, i), 1); 567 } else { 568 cnt++; 569 __set_bit(i, arena->map); 570 } 571 } 572 } 573 574 return cnt; 575} 576 577static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 578{ 579 static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; 580 struct iommu *iommu = pbm->iommu; 581 unsigned long num_tsb_entries, sz, tsbsize; 582 u32 dma_mask, dma_offset; 583 const u32 *vdma; 584 585 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); 586 if (!vdma) 587 vdma = vdma_default; 588 589 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { 590 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", 591 vdma[0], vdma[1]); 592 return -EINVAL; 593 }; 594 595 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); 596 num_tsb_entries = vdma[1] / IO_PAGE_SIZE; 597 tsbsize = num_tsb_entries * sizeof(iopte_t); 598 599 dma_offset = vdma[0]; 600 601 /* Setup initial software IOMMU state. */ 602 spin_lock_init(&iommu->lock); 603 iommu->ctx_lowest_free = 1; 604 iommu->page_table_map_base = dma_offset; 605 iommu->dma_addr_mask = dma_mask; 606 607 /* Allocate and initialize the free area map. */ 608 sz = (num_tsb_entries + 7) / 8; 609 sz = (sz + 7UL) & ~7UL; 610 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 611 if (!iommu->arena.map) { 612 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); 613 return -ENOMEM; 614 } 615 iommu->arena.limit = num_tsb_entries; 616 617 sz = probe_existing_entries(pbm, iommu); 618 if (sz) 619 printk("%s: Imported %lu TSB entries from OBP\n", 620 pbm->name, sz); 621 622 return 0; 623} 624 625#ifdef CONFIG_PCI_MSI 626struct pci_sun4v_msiq_entry { 627 u64 version_type; 628#define MSIQ_VERSION_MASK 0xffffffff00000000UL 629#define MSIQ_VERSION_SHIFT 32 630#define MSIQ_TYPE_MASK 0x00000000000000ffUL 631#define MSIQ_TYPE_SHIFT 0 632#define MSIQ_TYPE_NONE 0x00 633#define MSIQ_TYPE_MSG 0x01 634#define MSIQ_TYPE_MSI32 0x02 635#define MSIQ_TYPE_MSI64 0x03 636#define MSIQ_TYPE_INTX 0x08 637#define MSIQ_TYPE_NONE2 0xff 638 639 u64 intx_sysino; 640 u64 reserved1; 641 u64 stick; 642 u64 req_id; /* bus/device/func */ 643#define MSIQ_REQID_BUS_MASK 0xff00UL 644#define MSIQ_REQID_BUS_SHIFT 8 645#define MSIQ_REQID_DEVICE_MASK 0x00f8UL 646#define MSIQ_REQID_DEVICE_SHIFT 3 647#define MSIQ_REQID_FUNC_MASK 0x0007UL 648#define MSIQ_REQID_FUNC_SHIFT 0 649 650 u64 msi_address; 651 652 /* The format of this value is message type dependent. 653 * For MSI bits 15:0 are the data from the MSI packet. 654 * For MSI-X bits 31:0 are the data from the MSI packet. 655 * For MSG, the message code and message routing code where: 656 * bits 39:32 is the bus/device/fn of the msg target-id 657 * bits 18:16 is the message routing code 658 * bits 7:0 is the message code 659 * For INTx the low order 2-bits are: 660 * 00 - INTA 661 * 01 - INTB 662 * 10 - INTC 663 * 11 - INTD 664 */ 665 u64 msi_data; 666 667 u64 reserved2; 668}; 669 670static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, 671 unsigned long *head) 672{ 673 unsigned long err, limit; 674 675 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); 676 if (unlikely(err)) 677 return -ENXIO; 678 679 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 680 if (unlikely(*head >= limit)) 681 return -EFBIG; 682 683 return 0; 684} 685 686static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, 687 unsigned long msiqid, unsigned long *head, 688 unsigned long *msi) 689{ 690 struct pci_sun4v_msiq_entry *ep; 691 unsigned long err, type; 692 693 /* Note: void pointer arithmetic, 'head' is a byte offset */ 694 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 695 (pbm->msiq_ent_count * 696 sizeof(struct pci_sun4v_msiq_entry))) + 697 *head); 698 699 if ((ep->version_type & MSIQ_TYPE_MASK) == 0) 700 return 0; 701 702 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; 703 if (unlikely(type != MSIQ_TYPE_MSI32 && 704 type != MSIQ_TYPE_MSI64)) 705 return -EINVAL; 706 707 *msi = ep->msi_data; 708 709 err = pci_sun4v_msi_setstate(pbm->devhandle, 710 ep->msi_data /* msi_num */, 711 HV_MSISTATE_IDLE); 712 if (unlikely(err)) 713 return -ENXIO; 714 715 /* Clear the entry. */ 716 ep->version_type &= ~MSIQ_TYPE_MASK; 717 718 (*head) += sizeof(struct pci_sun4v_msiq_entry); 719 if (*head >= 720 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) 721 *head = 0; 722 723 return 1; 724} 725 726static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, 727 unsigned long head) 728{ 729 unsigned long err; 730 731 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); 732 if (unlikely(err)) 733 return -EINVAL; 734 735 return 0; 736} 737 738static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, 739 unsigned long msi, int is_msi64) 740{ 741 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, 742 (is_msi64 ? 743 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) 744 return -ENXIO; 745 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) 746 return -ENXIO; 747 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) 748 return -ENXIO; 749 return 0; 750} 751 752static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) 753{ 754 unsigned long err, msiqid; 755 756 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); 757 if (err) 758 return -ENXIO; 759 760 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); 761 762 return 0; 763} 764 765static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) 766{ 767 unsigned long q_size, alloc_size, pages, order; 768 int i; 769 770 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 771 alloc_size = (pbm->msiq_num * q_size); 772 order = get_order(alloc_size); 773 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); 774 if (pages == 0UL) { 775 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", 776 order); 777 return -ENOMEM; 778 } 779 memset((char *)pages, 0, PAGE_SIZE << order); 780 pbm->msi_queues = (void *) pages; 781 782 for (i = 0; i < pbm->msiq_num; i++) { 783 unsigned long err, base = __pa(pages + (i * q_size)); 784 unsigned long ret1, ret2; 785 786 err = pci_sun4v_msiq_conf(pbm->devhandle, 787 pbm->msiq_first + i, 788 base, pbm->msiq_ent_count); 789 if (err) { 790 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", 791 err); 792 goto h_error; 793 } 794 795 err = pci_sun4v_msiq_info(pbm->devhandle, 796 pbm->msiq_first + i, 797 &ret1, &ret2); 798 if (err) { 799 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", 800 err); 801 goto h_error; 802 } 803 if (ret1 != base || ret2 != pbm->msiq_ent_count) { 804 printk(KERN_ERR "MSI: Bogus qconf " 805 "expected[%lx:%x] got[%lx:%lx]\n", 806 base, pbm->msiq_ent_count, 807 ret1, ret2); 808 goto h_error; 809 } 810 } 811 812 return 0; 813 814h_error: 815 free_pages(pages, order); 816 return -EINVAL; 817} 818 819static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) 820{ 821 unsigned long q_size, alloc_size, pages, order; 822 int i; 823 824 for (i = 0; i < pbm->msiq_num; i++) { 825 unsigned long msiqid = pbm->msiq_first + i; 826 827 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); 828 } 829 830 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 831 alloc_size = (pbm->msiq_num * q_size); 832 order = get_order(alloc_size); 833 834 pages = (unsigned long) pbm->msi_queues; 835 836 free_pages(pages, order); 837 838 pbm->msi_queues = NULL; 839} 840 841static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, 842 unsigned long msiqid, 843 unsigned long devino) 844{ 845 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); 846 847 if (!virt_irq) 848 return -ENOMEM; 849 850 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) 851 return -EINVAL; 852 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) 853 return -EINVAL; 854 855 return virt_irq; 856} 857 858static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { 859 .get_head = pci_sun4v_get_head, 860 .dequeue_msi = pci_sun4v_dequeue_msi, 861 .set_head = pci_sun4v_set_head, 862 .msi_setup = pci_sun4v_msi_setup, 863 .msi_teardown = pci_sun4v_msi_teardown, 864 .msiq_alloc = pci_sun4v_msiq_alloc, 865 .msiq_free = pci_sun4v_msiq_free, 866 .msiq_build_irq = pci_sun4v_msiq_build_irq, 867}; 868 869static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 870{ 871 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); 872} 873#else /* CONFIG_PCI_MSI */ 874static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 875{ 876} 877#endif /* !(CONFIG_PCI_MSI) */ 878 879static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, 880 struct platform_device *op, u32 devhandle) 881{ 882 struct device_node *dp = op->dev.of_node; 883 int err; 884 885 pbm->numa_node = of_node_to_nid(dp); 886 887 pbm->pci_ops = &sun4v_pci_ops; 888 pbm->config_space_reg_bits = 12; 889 890 pbm->index = pci_num_pbms++; 891 892 pbm->op = op; 893 894 pbm->devhandle = devhandle; 895 896 pbm->name = dp->full_name; 897 898 printk("%s: SUN4V PCI Bus Module\n", pbm->name); 899 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node); 900 901 pci_determine_mem_io_space(pbm); 902 903 pci_get_pbm_props(pbm); 904 905 err = pci_sun4v_iommu_init(pbm); 906 if (err) 907 return err; 908 909 pci_sun4v_msi_init(pbm); 910 911 pci_sun4v_scan_bus(pbm, &op->dev); 912 913 pbm->next = pci_pbm_root; 914 pci_pbm_root = pbm; 915 916 return 0; 917} 918 919static int __devinit pci_sun4v_probe(struct platform_device *op, 920 const struct of_device_id *match) 921{ 922 const struct linux_prom64_registers *regs; 923 static int hvapi_negotiated = 0; 924 struct pci_pbm_info *pbm; 925 struct device_node *dp; 926 struct iommu *iommu; 927 u32 devhandle; 928 int i, err; 929 930 dp = op->dev.of_node; 931 932 if (!hvapi_negotiated++) { 933 err = sun4v_hvapi_register(HV_GRP_PCI, 934 vpci_major, 935 &vpci_minor); 936 937 if (err) { 938 printk(KERN_ERR PFX "Could not register hvapi, " 939 "err=%d\n", err); 940 return err; 941 } 942 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n", 943 vpci_major, vpci_minor); 944 945 dma_ops = &sun4v_dma_ops; 946 } 947 948 regs = of_get_property(dp, "reg", NULL); 949 err = -ENODEV; 950 if (!regs) { 951 printk(KERN_ERR PFX "Could not find config registers\n"); 952 goto out_err; 953 } 954 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; 955 956 err = -ENOMEM; 957 if (!iommu_batch_initialized) { 958 for_each_possible_cpu(i) { 959 unsigned long page = get_zeroed_page(GFP_KERNEL); 960 961 if (!page) 962 goto out_err; 963 964 per_cpu(iommu_batch, i).pglist = (u64 *) page; 965 } 966 iommu_batch_initialized = 1; 967 } 968 969 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); 970 if (!pbm) { 971 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n"); 972 goto out_err; 973 } 974 975 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); 976 if (!iommu) { 977 printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); 978 goto out_free_controller; 979 } 980 981 pbm->iommu = iommu; 982 983 err = pci_sun4v_pbm_init(pbm, op, devhandle); 984 if (err) 985 goto out_free_iommu; 986 987 dev_set_drvdata(&op->dev, pbm); 988 989 return 0; 990 991out_free_iommu: 992 kfree(pbm->iommu); 993 994out_free_controller: 995 kfree(pbm); 996 997out_err: 998 return err; 999} 1000 1001static struct of_device_id __initdata pci_sun4v_match[] = { 1002 { 1003 .name = "pci", 1004 .compatible = "SUNW,sun4v-pci", 1005 }, 1006 {}, 1007}; 1008 1009static struct of_platform_driver pci_sun4v_driver = { 1010 .driver = { 1011 .name = DRIVER_NAME, 1012 .owner = THIS_MODULE, 1013 .of_match_table = pci_sun4v_match, 1014 }, 1015 .probe = pci_sun4v_probe, 1016}; 1017 1018static int __init pci_sun4v_init(void) 1019{ 1020 return of_register_platform_driver(&pci_sun4v_driver); 1021} 1022 1023subsys_initcall(pci_sun4v_init); 1024