Lines Matching defs:cd

38  * @cd:	        genwqe device descriptor
44 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
46 struct pci_dev *pci_dev = cd->pci_dev;
48 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
51 if (cd->mmio == NULL)
57 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
63 * @cd: genwqe device descriptor
68 u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
70 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
73 if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
77 if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
81 if (cd->mmio == NULL)
84 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
89 * @cd: genwqe device descriptor
95 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
97 struct pci_dev *pci_dev = cd->pci_dev;
99 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
102 if (cd->mmio == NULL)
108 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
114 * @cd: genwqe device descriptor
119 u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
121 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
124 if (cd->mmio == NULL)
127 return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
132 * @cd: genwqe device descriptor
138 int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
141 u32 app_id = (u32)cd->app_unitcfg;
210 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
216 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
220 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
226 dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
229 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
233 struct pci_dev *pci_dev = cd->pci_dev;
242 static int genwqe_map_pages(struct genwqe_dev *cd,
247 struct pci_dev *pci_dev = cd->pci_dev;
271 genwqe_unmap_pages(cd, dma_list, num_pages);
291 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
295 struct pci_dev *pci_dev = cd->pci_dev;
317 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
327 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
340 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
355 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
360 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
365 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
374 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
466 * @cd: genwqe device descriptor
473 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
478 struct pci_dev *pci_dev = cd->pci_dev;
491 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
508 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
513 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
524 * @cd: pointer to genwqe device
545 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
550 struct pci_dev *pci_dev = cd->pci_dev;
595 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
617 * @cd: pointer to genwqe device
620 int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
622 struct pci_dev *pci_dev = cd->pci_dev;
631 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
649 * @cd: pointer to the genwqe device descriptor
655 u8 genwqe_card_type(struct genwqe_dev *cd)
657 u64 card_type = cd->slu_unitcfg;
664 * @cd: pointer to the genwqe device descriptor
666 int genwqe_card_reset(struct genwqe_dev *cd)
669 struct pci_dev *pci_dev = cd->pci_dev;
671 if (!genwqe_is_privileged(cd))
675 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
677 __genwqe_readq(cd, IO_HSU_FIR_CLR);
678 __genwqe_readq(cd, IO_APP_FIR_CLR);
679 __genwqe_readq(cd, IO_SLU_FIR_CLR);
689 softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
690 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
695 if (genwqe_need_err_masking(cd)) {
698 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
703 int genwqe_read_softreset(struct genwqe_dev *cd)
707 if (!genwqe_is_privileged(cd))
710 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
711 cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
717 * @cd: pointer to the device
721 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
725 rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
733 * @cd: pointer to the device
735 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
737 pci_free_irq_vectors(cd->pci_dev);
742 * @cd: card device
750 static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
764 static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
767 return set_reg_idx(cd, r, i, m, addr, 0, val);
770 int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
778 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
779 set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
782 sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
783 set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
786 appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
787 set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
794 ufir = __genwqe_readq(cd, ufir_addr);
795 set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
799 ufec = __genwqe_readq(cd, ufec_addr);
800 set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
808 sfir = __genwqe_readq(cd, sfir_addr);
809 set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
812 sfec = __genwqe_readq(cd, sfec_addr);
813 set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
827 * @cd: genwqe device descriptor
830 int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
837 eevptr = __genwqe_readq(cd, eevptr_addr);
843 val = __genwqe_readq(cd, l_addr);
866 val = __genwqe_readq(cd, addr);
882 * @cd: genwqe device descriptor
887 int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
896 eevptr = __genwqe_readq(cd, eevptr_addr);
901 e = __genwqe_readq(cd, l_addr);
912 val = __genwqe_readq(cd, d_addr);
913 set_reg_idx(cd, regs, &idx, max_regs,
919 val = __genwqe_readq(cd, d_addr);
920 set_reg_idx(cd, regs, &idx, max_regs,
935 val = __genwqe_readq(cd, addr);
953 __genwqe_writeq(cd, addr, diag_sel);
960 val = __genwqe_readq(cd, addr);
961 set_reg_idx(cd, regs, &idx, max_regs, addr,
971 * @cd: genwqe device descriptor
979 int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
981 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
982 __genwqe_writeq(cd, reg, val);
988 * @cd: genwqe device descriptor
995 u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
997 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
998 return __genwqe_readq(cd, reg);
1003 * @cd: genwqe device descriptor
1014 int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1019 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1028 * @cd: genwqe device descriptor
1032 void genwqe_stop_traps(struct genwqe_dev *cd)
1034 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1039 * @cd: genwqe device descriptor
1043 void genwqe_start_traps(struct genwqe_dev *cd)
1045 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1047 if (genwqe_need_err_masking(cd))
1048 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);