• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/ipath/

Lines Matching defs:dd

123 static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
130 ipath_dev_err(dd, "failed to read bar0 before enable: "
135 ipath_dev_err(dd, "failed to read bar1 before enable: "
142 struct ipath_devdata *dd)
148 if (dd->ipath_unit != -1) {
150 idr_remove(&unit_table, dd->ipath_unit);
151 list_del(&dd->ipath_list);
154 vfree(dd);
160 struct ipath_devdata *dd;
164 dd = ERR_PTR(-ENOMEM);
168 dd = vmalloc(sizeof(*dd));
169 if (!dd) {
170 dd = ERR_PTR(-ENOMEM);
173 memset(dd, 0, sizeof(*dd));
174 dd->ipath_unit = -1;
178 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
182 ipath_free_devdata(pdev, dd);
183 dd = ERR_PTR(ret);
187 dd->pcidev = pdev;
188 pci_set_drvdata(pdev, dd);
190 list_add(&dd->ipath_list, &ipath_dev_list);
196 return dd;
206 struct ipath_devdata *dd;
210 dd = __ipath_lookup(unit);
213 return dd;
219 struct ipath_devdata *dd;
227 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
229 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
231 if (dd->ipath_lid &&
232 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
235 if (dd->ipath_cfgports > maxports)
236 maxports = dd->ipath_cfgports;
257 int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
262 void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
270 struct ipath_devdata *dd;
275 dd = ipath_alloc_devdata(pdev);
276 if (IS_ERR(dd)) {
277 ret = PTR_ERR(dd);
283 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
285 read_bars(dd, pdev, &bar0, &bar1);
301 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
302 dd->ipath_unit, -ret);
311 read_bars(dd, pdev, &bar0, &bar1);
320 ipath_dev_err(dd, "rewrite of BAR0 "
327 ipath_dev_err(dd, "rewrite of BAR1 "
332 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
342 "err %d\n", dd->ipath_unit, -ret);
357 dd->ipath_unit, ret);
367 dd->ipath_unit, ret);
377 dd->ipath_unit, ret);
386 dd->ipath_pcibar0 = addr;
387 dd->ipath_pcibar1 = addr >> 32;
388 dd->ipath_deviceid = ent->device; /* save for later use */
389 dd->ipath_vendorid = ent->vendor;
395 ipath_init_iba6110_funcs(dd);
398 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
404 ipath_init_iba6120_funcs(dd);
407 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
412 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
427 ipath_dev_err(dd, "No valid address in BAR 0!\n");
432 dd->ipath_deviceid = ent->device; /* save for later use */
433 dd->ipath_vendorid = ent->vendor;
437 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
438 "%u: err %d\n", dd->ipath_unit, -ret);
441 dd->ipath_pcirev = rev;
445 dd->ipath_kregbase = __ioremap(addr, len,
448 dd->ipath_kregbase = ioremap_nocache(addr, len);
451 if (!dd->ipath_kregbase) {
457 dd->ipath_kregend = (u64 __iomem *)
458 ((void __iomem *)dd->ipath_kregbase + len);
459 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
462 addr, dd->ipath_kregbase);
468 dd->ipath_flags = 0;
469 dd->ipath_lli_counter = 0;
470 dd->ipath_lli_errors = 0;
472 if (dd->ipath_f_bus(dd, pdev))
473 ipath_dev_err(dd, "Failed to setup config space; "
482 if (!dd->ipath_irq)
483 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
486 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
487 IPATH_DRV_NAME, dd);
489 ipath_dev_err(dd, "Couldn't setup irq handler, "
490 "irq=%d: %d\n", dd->ipath_irq, ret);
495 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
499 ret = ipath_enable_wc(dd);
502 ipath_dev_err(dd, "Write combining not enabled "
508 ipath_device_create_group(&pdev->dev, dd);
509 ipathfs_add_device(dd);
510 ipath_user_add(dd);
511 ipath_diag_add(dd);
512 ipath_register_ib_device(dd);
517 if (pdev->irq) free_irq(pdev->irq, dd);
520 iounmap((volatile void __iomem *) dd->ipath_kregbase);
529 ipath_free_devdata(pdev, dd);
535 static void __devexit cleanup_device(struct ipath_devdata *dd)
539 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
541 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
542 if (dd->ipath_kregbase) {
548 dd->ipath_kregbase = NULL;
549 dd->ipath_uregbase = 0;
550 dd->ipath_sregbase = 0;
551 dd->ipath_cregbase = 0;
552 dd->ipath_kregsize = 0;
554 ipath_disable_wc(dd);
557 if (dd->ipath_pioavailregs_dma) {
558 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
559 (void *) dd->ipath_pioavailregs_dma,
560 dd->ipath_pioavailregs_phys);
561 dd->ipath_pioavailregs_dma = NULL;
563 if (dd->ipath_dummy_hdrq) {
564 dma_free_coherent(&dd->pcidev->dev,
565 dd->ipath_pd[0]->port_rcvhdrq_size,
566 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
567 dd->ipath_dummy_hdrq = NULL;
570 if (dd->ipath_pageshadow) {
571 struct page **tmpp = dd->ipath_pageshadow;
572 dma_addr_t *tmpd = dd->ipath_physshadow;
577 for (port = 0; port < dd->ipath_cfgports; port++) {
578 int port_tidbase = port * dd->ipath_rcvtidcnt;
579 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
583 pci_unmap_page(dd->pcidev, tmpd[i],
605 dd->ipath_pageshadow);
606 tmpp = dd->ipath_pageshadow;
607 dd->ipath_pageshadow = NULL;
616 for (port = 0; port < dd->ipath_portcnt; port++) {
617 struct ipath_portdata *pd = dd->ipath_pd[port];
618 dd->ipath_pd[port] = NULL;
619 ipath_free_pddata(dd, pd);
621 kfree(dd->ipath_pd);
626 dd->ipath_pd = NULL;
631 struct ipath_devdata *dd = pci_get_drvdata(pdev);
633 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
639 ipath_shutdown_device(dd);
641 if (dd->verbs_dev)
642 ipath_unregister_ib_device(dd->verbs_dev);
644 ipath_diag_remove(dd);
645 ipath_user_remove(dd);
646 ipathfs_remove_device(dd);
647 ipath_device_remove_group(&pdev->dev, dd);
649 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
650 "unit %u\n", dd, (u32) dd->ipath_unit);
652 cleanup_device(dd);
660 if (dd->ipath_irq) {
662 dd->ipath_unit, dd->ipath_irq);
663 dd->ipath_f_free_irq(dd);
666 "for unit %u\n", dd->ipath_unit);
673 if (dd->ipath_f_cleanup)
675 dd->ipath_f_cleanup(dd);
677 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
678 iounmap((volatile void __iomem *) dd->ipath_kregbase);
683 ipath_free_devdata(pdev, dd);
693 * @dd: the infinipath device
702 void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
709 sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
713 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
726 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
728 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
729 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
730 dd->ipath_sendctrl);
735 * @dd: the infinipath device
745 static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
748 dd->ipath_state_wanted = state;
750 (dd->ipath_flags & state),
752 dd->ipath_state_wanted = 0;
754 if (!(dd->ipath_flags & state)) {
763 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
766 dd, dd->ipath_kregs->kr_ibcctrl),
770 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
904 * @dd: the infinipath device
910 static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
913 return dd->ipath_port0_skbinfo ?
914 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
919 * @dd: the infinipath device
922 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
940 len = dd->ipath_ibmaxlen + 4;
942 if (dd->ipath_flags & IPATH_4BYTE_TID) {
952 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
959 if (dd->ipath_flags & IPATH_4BYTE_TID) {
969 static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
990 u8 n = (dd->ipath_ibcctrl >>
994 if (++dd->ipath_lli_counter > n) {
995 dd->ipath_lli_counter = 0;
996 dd->ipath_lli_errors++;
1003 * @dd: the infinipath device
1007 void ipath_kreceive(struct ipath_devdata *dd)
1011 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1012 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1018 if (!dd->ipath_hdrqtailptr) {
1019 ipath_dev_err(dd,
1025 if (test_and_set_bit(0, &dd->ipath_rcv_pending))
1028 l = dd->ipath_port0head;
1029 hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
1038 rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
1063 ebuf = ipath_get_egrbuf(dd, etail, 0);
1080 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
1082 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
1083 if (dd->ipath_lli_counter)
1084 dd->ipath_lli_counter--;
1127 lval = dd->ipath_rhdrhead_intr_off | l;
1130 (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
1132 (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
1139 if (!dd->ipath_rhdrhead_intr_off && !reloop) {
1140 u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
1150 dd->ipath_port0head = l;
1159 clear_bit(0, &dd->ipath_rcv_pending);
1167 * @dd: the infinipath device
1174 static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1178 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1198 if (!dd->ipath_pioavailregs_dma) {
1204 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1205 unsigned long *shadow = dd->ipath_pioavailshadow;
1241 dd->ipath_pioavailregs_dma[i - 1]);
1244 dd->ipath_pioavailregs_dma[i + 1]);
1246 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1248 ~(dd->ipath_pioavailshadow[i] ^ piov);
1250 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1251 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1253 dd->ipath_pioavailshadow[i] = pnew;
1261 * @dd: the infinipath device
1266 int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1270 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1271 if (dd->ipath_rcvhdrsize != rhdrsize) {
1272 dev_info(&dd->pcidev->dev,
1275 rhdrsize, dd->ipath_rcvhdrsize);
1279 "size %u\n", dd->ipath_rcvhdrsize);
1280 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1284 dd->ipath_rcvhdrentsize -
1288 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1289 dd->ipath_rcvhdrsize = rhdrsize;
1290 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1291 dd->ipath_rcvhdrsize);
1293 dd->ipath_rcvhdrsize);
1300 * @dd: the infinipath device
1307 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1312 unsigned long *shadow = dd->ipath_pioavailshadow;
1315 piobcnt = (unsigned)(dd->ipath_piobcnt2k
1316 + dd->ipath_piobcnt4k);
1317 starti = dd->ipath_lastport_piobuf;
1319 if (dd->ipath_upd_pio_shadow) {
1325 ipath_update_pio_bufs(dd);
1330 i = dd->ipath_lastpioindex;
1357 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1365 ipath_update_pio_bufs(dd);
1370 dd->ipath_upd_pio_shadow = 1;
1375 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1380 dd->ipath_consec_nopiobuf,
1391 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1415 dd->ipath_lastpioindex = i + 1;
1416 if (dd->ipath_upd_pio_shadow)
1417 dd->ipath_upd_pio_shadow = 0;
1418 if (dd->ipath_consec_nopiobuf)
1419 dd->ipath_consec_nopiobuf = 0;
1420 if (i < dd->ipath_piobcnt2k)
1421 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1422 i * dd->ipath_palign);
1425 (dd->ipath_pio4kbase +
1426 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1428 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1438 * @dd: the infinipath device
1445 int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1453 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1457 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1461 ipath_dev_err(dd, "attempt to allocate %d bytes "
1468 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
1470 ipath_dev_err(dd, "attempt to allocate 1 page "
1474 dma_free_coherent(&dd->pcidev->dev, amt,
1510 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1512 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1520 int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
1527 lastval = ipath_read_kreg64(dd, reg_id);
1531 val = ipath_read_kreg64(dd, reg_id);
1560 * @dd: the infinipath device
1565 int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1574 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
1591 static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1603 "is %s\n", dd->ipath_unit,
1607 (dd, dd->ipath_kregs->kr_ibcstatus) >>
1613 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1615 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
1616 (unsigned)(dd->ipath_piobcnt2k +
1617 dd->ipath_piobcnt4k) -
1618 dd->ipath_lastport_piobuf);
1621 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1622 dd->ipath_ibcctrl | which);
1625 int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1632 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
1639 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
1646 ipath_set_ib_lstate(dd,
1654 if (dd->ipath_flags & IPATH_LINKINIT) {
1658 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
1664 if (dd->ipath_flags & IPATH_LINKARMED) {
1668 if (!(dd->ipath_flags &
1673 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
1683 if (dd->ipath_flags & IPATH_LINKACTIVE) {
1687 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
1691 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
1697 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
1698 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1699 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1700 dd->ipath_ibcctrl);
1705 dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n");
1706 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1707 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1708 dd->ipath_ibcctrl);
1717 ret = ipath_wait_linkstate(dd, lstate, 2000);
1725 * @dd: the infinipath device
1735 int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1753 if (dd->ipath_ibmtu == arg) {
1758 piosize = dd->ipath_ibmaxlen;
1759 dd->ipath_ibmtu = arg;
1763 if (piosize != dd->ipath_init_ibmaxlen) {
1764 dd->ipath_ibmaxlen = piosize;
1767 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
1770 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
1772 dd->ipath_ibmaxlen = piosize;
1781 u64 ibc = dd->ipath_ibcctrl;
1786 dd->ipath_ibmaxlen = piosize;
1795 dd->ipath_ibcctrl = ibc;
1796 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1797 dd->ipath_ibcctrl);
1798 dd->ipath_f_tidtemplate(dd);
1807 int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
1809 dd->ipath_lid = arg;
1810 dd->ipath_lmc = lmc;
1818 * @dd: the infinipath device
1826 void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1831 if (port < dd->ipath_portcnt &&
1832 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1833 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1838 ipath_write_kreg(dd, where, value);
1843 * @dd: the infinipath device
1848 * Everything it does has to be setup again by ipath_init_chip(dd,1)
1850 void ipath_shutdown_device(struct ipath_devdata *dd)
1854 dd->ipath_flags |= IPATH_LINKUNK;
1855 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
1858 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
1862 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
1864 dd->ipath_rcvctrl = 0;
1865 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1866 dd->ipath_rcvctrl);
1872 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1874 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1887 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1890 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1894 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
1895 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1896 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
1903 dd->ipath_f_quiet_serdes(dd);
1904 dd->ipath_f_setextled(dd, 0, 0);
1906 if (dd->ipath_stats_timer_active) {
1907 del_timer_sync(&dd->ipath_stats_timer);
1908 dd->ipath_stats_timer_active = 0;
1916 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
1918 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
1919 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
1924 * @dd: the infinipath device
1934 void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
1943 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
1947 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1964 dma_free_coherent(&dd->pcidev->dev, size,
1972 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
1974 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
1976 dd->ipath_port0_skbinfo = NULL;
1980 for (e = 0; e < dd->ipath_rcvegrcnt; e++)
1982 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
1983 dd->ipath_ibmaxlen,
2073 struct ipath_devdata *dd = ipath_lookup(unit);
2075 if (!dd) {
2080 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2082 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2083 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2089 if (dd->ipath_pd)
2090 for (i = 1; i < dd->ipath_cfgports; i++) {
2091 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
2095 dd->ipath_pd[i]->port_pid,
2096 dd->ipath_pd[i]->port_comm);
2102 dd->ipath_flags &= ~IPATH_INITTED;
2103 ret = dd->ipath_f_reset(dd);
2108 ret = ipath_init_chip(dd, 1);
2110 ipath_dev_err(dd, "Reinitialize unit %u after "
2113 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2120 int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2126 if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
2127 dd->ipath_rx_pol_inv = new_pol_inv;
2128 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2131 val |= ((u64)dd->ipath_rx_pol_inv) <<
2133 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);