Lines Matching refs:rf

72  * @rf: RDMA PCI function
75 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
78 struct irdma_sc_dev *dev = &rf->sc_dev;
102 * @rf: RDMA PCI function
105 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
107 struct irdma_sc_dev *dev = &rf->sc_dev;
127 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
130 irdma_puda_ce_handler(rf, cq);
211 * @rf: RDMA PCI function
213 static void irdma_process_aeq(struct irdma_pci_f *rf)
215 struct irdma_sc_dev *dev = &rf->sc_dev;
216 struct irdma_aeq *aeq = &rf->aeq;
225 struct irdma_device *iwdev = rf->iwdev;
246 spin_lock_irqsave(&rf->qptable_lock, flags);
247 iwqp = rf->qp_table[info->qp_cq_id];
249 spin_unlock_irqrestore(&rf->qptable_lock,
261 spin_unlock_irqrestore(&rf->qptable_lock, flags);
343 spin_lock_irqsave(&rf->cqtable_lock, flags);
344 iwcq = rf->cq_table[info->qp_cq_id];
346 spin_unlock_irqrestore(&rf->cqtable_lock,
353 spin_unlock_irqrestore(&rf->cqtable_lock, flags);
448 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
450 if (rf->msix_shared)
451 irdma_process_ceq(rf, rf->ceqlist);
452 irdma_process_aeq(rf);
453 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
463 struct irdma_pci_f *rf = iwceq->rf;
465 irdma_process_ceq(rf, iwceq);
466 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
471 * @rf: RDMA PCI function
476 static int irdma_save_msix_info(struct irdma_pci_f *rf)
485 if (!rf->msix_count)
488 size = sizeof(struct irdma_msix_vector) * rf->msix_count;
489 size += struct_size(iw_qvlist, qv_info, rf->msix_count);
490 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
491 if (!rf->iw_msixtbl)
494 rf->iw_qvlist = (struct irdma_qvlist_info *)
495 (&rf->iw_msixtbl[rf->msix_count]);
496 iw_qvlist = rf->iw_qvlist;
498 iw_qvlist->num_vectors = rf->msix_count;
499 if (rf->msix_count <= num_online_cpus())
500 rf->msix_shared = true;
501 else if (rf->msix_count > num_online_cpus() + 1)
502 rf->msix_count = num_online_cpus() + 1;
504 pmsix = rf->msix_entries;
505 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
506 rf->iw_msixtbl[i].idx = pmsix->entry;
507 rf->iw_msixtbl[i].irq = pmsix->vector;
508 rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
511 if (rf->msix_shared)
520 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
534 struct irdma_pci_f *rf = data;
536 tasklet_schedule(&rf->dpc_tasklet);
551 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
560 * @rf: RDMA PCI function
566 static void irdma_destroy_irq(struct irdma_pci_f *rf,
569 struct irdma_sc_dev *dev = &rf->sc_dev;
574 if (rf == dev_id) {
575 tasklet_kill(&rf->dpc_tasklet);
585 * @rf: RDMA PCI function
590 static void irdma_destroy_cqp(struct irdma_pci_f *rf)
592 struct irdma_sc_dev *dev = &rf->sc_dev;
593 struct irdma_cqp *cqp = &rf->cqp;
600 irdma_cleanup_pending_cqp_op(rf);
610 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
612 struct irdma_aeq *aeq = &rf->aeq;
616 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
617 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
623 * @rf: RDMA PCI function
629 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
631 struct irdma_sc_dev *dev = &rf->sc_dev;
632 struct irdma_aeq *aeq = &rf->aeq;
635 if (!rf->msix_shared) {
636 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
637 irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
639 if (rf->reset)
649 irdma_destroy_virt_aeq(rf);
659 * @rf: RDMA PCI function
665 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
667 struct irdma_sc_dev *dev = &rf->sc_dev;
670 if (rf->reset)
691 * @rf: RDMA PCI function
695 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
697 struct irdma_ceq *iwceq = rf->ceqlist;
700 if (rf->msix_shared) {
701 msix_vec = &rf->iw_msixtbl[0];
702 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
705 irdma_destroy_irq(rf, msix_vec, rf);
707 msix_vec = &rf->iw_msixtbl[1];
708 irdma_destroy_irq(rf, msix_vec, iwceq);
711 irdma_destroy_ceq(rf, iwceq);
712 rf->sc_dev.ceq_valid = false;
713 rf->ceqs_count = 0;
718 * @rf: RDMA PCI function
723 static void irdma_del_ceqs(struct irdma_pci_f *rf)
725 struct irdma_ceq *iwceq = &rf->ceqlist[1];
729 if (rf->msix_shared)
730 msix_vec = &rf->iw_msixtbl[1];
732 msix_vec = &rf->iw_msixtbl[2];
734 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
735 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
737 irdma_destroy_irq(rf, msix_vec, iwceq);
738 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
740 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
744 rf->ceqs_count = 1;
749 * @rf: RDMA PCI function
754 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
756 struct irdma_sc_dev *dev = &rf->sc_dev;
757 struct irdma_ccq *ccq = &rf->ccq;
760 if (rf->cqp_cmpl_wq)
761 destroy_workqueue(rf->cqp_cmpl_wq);
763 if (!rf->reset)
832 * @rf: RDMA PCI function
839 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
842 struct irdma_sc_dev *dev = &rf->sc_dev;
848 info.entry_type = rf->sd_type;
887 * @rf: RDMA PCI function
896 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
903 va = (unsigned long)rf->obj_next.va;
909 memptr->pa = rf->obj_next.pa + extra;
911 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
914 rf->obj_next.va = (u8 *)memptr->va + size;
915 rf->obj_next.pa = memptr->pa + size;
922 * @rf: RDMA PCI function
927 static int irdma_create_cqp(struct irdma_pci_f *rf)
931 struct irdma_sc_dev *dev = &rf->sc_dev;
933 struct irdma_cqp *cqp = &rf->cqp;
958 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
972 cqp_init_info.hmc_profile = rf->rsrc_profile;
974 cqp_init_info.protocol_used = rf->protocol_used;
976 switch (rf->rdma_ver) {
1028 * @rf: RDMA PCI function
1033 static int irdma_create_ccq(struct irdma_pci_f *rf)
1035 struct irdma_sc_dev *dev = &rf->sc_dev;
1037 struct irdma_ccq *ccq = &rf->ccq;
1051 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1067 info.vsi = &rf->default_vsi;
1092 status = irdma_alloc_local_mac_entry(iwdev->rf,
1095 status = irdma_add_local_mac_entry(iwdev->rf,
1099 irdma_del_local_mac_entry(iwdev->rf,
1108 * @rf: RDMA PCI function
1116 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1121 if (rf->msix_shared && !ceq_id) {
1123 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
1124 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1126 msix_vec->name, rf);
1130 dev_name(&rf->pcidev->dev), ceq_id);
1140 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1145 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1152 * @rf: RDMA PCI function
1157 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1159 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1162 if (!rf->msix_shared) {
1164 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
1165 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1167 msix_vec->name, rf);
1170 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1174 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1181 * @rf: RDMA PCI function
1189 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1194 struct irdma_sc_dev *dev = &rf->sc_dev;
1198 iwceq->rf = rf;
1199 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1218 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1235 * @rf: RDMA PCI function
1241 static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
1249 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1250 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1251 if (!rf->ceqlist) {
1256 iwceq = &rf->ceqlist[0];
1257 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1259 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1265 i = rf->msix_shared ? 0 : 1;
1266 msix_vec = &rf->iw_msixtbl[i];
1269 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1271 irdma_destroy_ceq(rf, iwceq);
1275 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1276 rf->ceqs_count++;
1279 if (status && !rf->ceqs_count) {
1280 kfree(rf->ceqlist);
1281 rf->ceqlist = NULL;
1284 rf->sc_dev.ceq_valid = true;
1291 * @rf: RDMA PCI function
1298 static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
1307 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1308 i = (rf->msix_shared) ? 1 : 2;
1310 iwceq = &rf->ceqlist[ceq_id];
1311 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1313 ibdev_dbg(&rf->iwdev->ibdev,
1318 msix_vec = &rf->iw_msixtbl[i];
1321 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1323 irdma_destroy_ceq(rf, iwceq);
1326 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1327 rf->ceqs_count++;
1333 irdma_del_ceqs(rf);
1338 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1340 struct irdma_aeq *aeq = &rf->aeq;
1345 if (rf->rdma_ver < IRDMA_GEN_2)
1355 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1362 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1364 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1374 * @rf: RDMA PCI function
1379 static int irdma_create_aeq(struct irdma_pci_f *rf)
1382 struct irdma_sc_dev *dev = &rf->sc_dev;
1383 struct irdma_aeq *aeq = &rf->aeq;
1384 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1386 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1402 status = irdma_create_virt_aeq(rf, aeq_size);
1416 info.msix_idx = rf->iw_msixtbl->idx;
1429 irdma_destroy_virt_aeq(rf);
1441 * @rf: RDMA PCI function
1446 static int irdma_setup_aeq(struct irdma_pci_f *rf)
1448 struct irdma_sc_dev *dev = &rf->sc_dev;
1451 status = irdma_create_aeq(rf);
1455 status = irdma_cfg_aeq_vector(rf);
1457 irdma_destroy_aeq(rf);
1461 if (!rf->msix_shared)
1462 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1484 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1514 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1532 struct irdma_pci_f *rf = iwdev->rf;
1536 iwdev->rf->reset = true;
1537 rf->gen_ops.request_reset(rf);
1543 * @rf: RDMA PCI function
1549 static int irdma_hmc_setup(struct irdma_pci_f *rf)
1554 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1556 rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1557 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1561 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1568 * @rf: RDMA PCI function
1570 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1572 struct irdma_sc_dev *dev = &rf->sc_dev;
1576 vfree(rf->mem_rsrc);
1577 rf->mem_rsrc = NULL;
1578 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1579 rf->obj_mem.pa);
1580 rf->obj_mem.va = NULL;
1581 if (rf->rdma_ver != IRDMA_GEN_1) {
1582 bitmap_free(rf->allocated_ws_nodes);
1583 rf->allocated_ws_nodes = NULL;
1585 kfree(rf->ceqlist);
1586 rf->ceqlist = NULL;
1587 kfree(rf->iw_msixtbl);
1588 rf->iw_msixtbl = NULL;
1589 kfree(rf->hmc_info_mem);
1590 rf->hmc_info_mem = NULL;
1595 * @rf: RDMA PCI function
1601 static int irdma_initialize_dev(struct irdma_pci_f *rf)
1604 struct irdma_sc_dev *dev = &rf->sc_dev;
1613 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1614 if (!rf->hmc_info_mem)
1617 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1618 dev->hmc_info = &rf->hw.hmc;
1620 (rf->pble_rsrc + 1);
1622 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1630 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1638 info.bar0 = rf->hw.hw_addr;
1639 info.hmc_fn_id = rf->pf_id;
1640 info.hw = &rf->hw;
1641 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1647 kfree(rf->hmc_info_mem);
1648 rf->hmc_info_mem = NULL;
1666 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1667 irdma_del_local_mac_entry(iwdev->rf,
1676 iwdev->rf->reset);
1682 iwdev->rf->reset);
1698 static int irdma_setup_init_state(struct irdma_pci_f *rf)
1702 status = irdma_save_msix_info(rf);
1706 rf->hw.device = &rf->pcidev->dev;
1707 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1708 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1709 &rf->obj_mem.pa, GFP_KERNEL);
1710 if (!rf->obj_mem.va) {
1715 rf->obj_next = rf->obj_mem;
1716 status = irdma_initialize_dev(rf);
1723 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1724 rf->obj_mem.pa);
1725 rf->obj_mem.va = NULL;
1727 kfree(rf->iw_msixtbl);
1728 rf->iw_msixtbl = NULL;
1740 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1741 iwdev->rf->max_pd);
1742 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1743 iwdev->rf->max_qp);
1744 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1745 iwdev->rf->max_cq);
1746 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1747 iwdev->rf->max_mr);
1750 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1752 enum init_completion_state state = rf->init_state;
1754 rf->init_state = INVALID_STATE;
1755 if (rf->rsrc_created) {
1756 irdma_destroy_aeq(rf);
1757 irdma_destroy_pble_prm(rf->pble_rsrc);
1758 irdma_del_ceqs(rf);
1759 rf->rsrc_created = false;
1763 irdma_del_ceq_0(rf);
1766 irdma_destroy_ccq(rf);
1770 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1771 rf->reset, rf->rdma_ver);
1774 irdma_destroy_cqp(rf);
1777 irdma_del_init_mem(rf);
1781 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1797 struct irdma_pci_f *rf = iwdev->rf;
1798 struct irdma_sc_dev *dev = &rf->sc_dev;
1807 vsi_info.register_qset = rf->gen_ops.register_qset;
1808 vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1812 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1840 if (!rf->rsrc_created) {
1841 status = irdma_setup_ceqs(rf, &iwdev->vsi);
1847 status = irdma_hmc_init_pble(&rf->sc_dev,
1848 rf->pble_rsrc);
1850 irdma_del_ceqs(rf);
1856 status = irdma_setup_aeq(rf);
1858 irdma_destroy_pble_prm(rf->pble_rsrc);
1859 irdma_del_ceqs(rf);
1863 rf->rsrc_created = true;
1866 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1884 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1893 * @rf: RDMA PCI function
1897 int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
1899 struct irdma_sc_dev *dev = &rf->sc_dev;
1902 status = irdma_setup_init_state(rf);
1905 rf->init_state = INITIAL_STATE;
1907 status = irdma_create_cqp(rf);
1910 rf->init_state = CQP_CREATED;
1912 status = irdma_hmc_setup(rf);
1915 rf->init_state = HMC_OBJS_CREATED;
1917 status = irdma_initialize_hw_rsrc(rf);
1920 rf->init_state = HW_RSRC_INITIALIZED;
1922 status = irdma_create_ccq(rf);
1925 rf->init_state = CCQ_CREATED;
1928 if (rf->rdma_ver != IRDMA_GEN_1) {
1934 status = irdma_setup_ceq_0(rf);
1937 rf->init_state = CEQ0_CREATED;
1939 rf->cqp_cmpl_wq =
1941 if (!rf->cqp_cmpl_wq) {
1945 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
1950 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
1951 rf->init_state, status);
1952 irdma_ctrl_deinit_hw(rf);
1958 * @rf: RDMA PCI function
1960 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
1962 rf->allocated_qps = (void *)(rf->mem_rsrc +
1963 (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
1964 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
1965 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
1966 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
1967 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
1968 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
1969 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
1970 rf->qp_table = (struct irdma_qp **)
1971 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
1972 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
1974 spin_lock_init(&rf->rsrc_lock);
1975 spin_lock_init(&rf->arp_lock);
1976 spin_lock_init(&rf->qptable_lock);
1977 spin_lock_init(&rf->cqtable_lock);
1978 spin_lock_init(&rf->qh_list_lock);
1983 * @rf: RDMA PCI function
1985 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
1989 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
1990 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
1991 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
1992 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
1993 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
1994 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
1995 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
1996 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
1997 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
1998 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
2005 * @rf: RDMA PCI function
2007 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
2013 if (rf->rdma_ver != IRDMA_GEN_1) {
2014 rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
2016 if (!rf->allocated_ws_nodes)
2019 set_bit(0, rf->allocated_ws_nodes);
2020 rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
2022 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
2023 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
2024 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
2025 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2026 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2027 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2028 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2029 rf->max_mcg = rf->max_qp;
2031 rsrc_size = irdma_calc_mem_rsrc_size(rf);
2032 rf->mem_rsrc = vzalloc(rsrc_size);
2033 if (!rf->mem_rsrc) {
2038 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2040 irdma_set_hw_rsrc(rf);
2042 set_bit(0, rf->allocated_mrs);
2043 set_bit(0, rf->allocated_qps);
2044 set_bit(0, rf->allocated_cqs);
2045 set_bit(0, rf->allocated_pds);
2046 set_bit(0, rf->allocated_arps);
2047 set_bit(0, rf->allocated_ahs);
2048 set_bit(0, rf->allocated_mcgs);
2049 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2050 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2051 set_bit(1, rf->allocated_cqs);
2052 set_bit(1, rf->allocated_pds);
2053 set_bit(2, rf->allocated_cqs);
2054 set_bit(2, rf->allocated_pds);
2056 INIT_LIST_HEAD(&rf->mc_qht_list.list);
2058 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2059 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2064 bitmap_free(rf->allocated_ws_nodes);
2065 rf->allocated_ws_nodes = NULL;
2072 * @rf: RDMA PCI function
2075 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2078 struct irdma_sc_dev *dev = &rf->sc_dev;
2086 spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2088 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2097 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2108 irdma_put_cqp_request(&rf->cqp, cqp_request);
2112 irdma_put_cqp_request(&rf->cqp, cqp_request);
2131 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2133 struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2135 irdma_cqp_ce_handler(rf, cq);
2193 * @rf: RDMA PCI function
2196 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2198 struct irdma_cqp *iwcqp = &rf->cqp;
2214 irdma_handle_cqp_op(rf, cqp_request);
2221 * @rf: RDMA PCI function
2225 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2228 struct irdma_cqp *iwcqp = &rf->cqp;
2247 status = irdma_handle_cqp_op(rf, cqp_request);
2255 * @rf: RDMA PCI function
2262 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2264 struct irdma_cqp *iwcqp = &rf->cqp;
2278 status = irdma_handle_cqp_op(rf, cqp_request);
2301 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2312 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2317 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2318 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2389 * @rf: RDMA PCI function
2395 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2404 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2408 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2422 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2427 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2432 irdma_handle_cqp_op(rf, cqp_request);
2433 irdma_put_cqp_request(&rf->cqp, cqp_request);
2463 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2528 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2532 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2580 * @rf: RDMA PCI function
2585 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2594 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2607 status = irdma_handle_cqp_op(rf, cqp_request);
2611 irdma_put_cqp_request(&rf->cqp, cqp_request);
2642 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2655 status = irdma_handle_cqp_op(rf, new_req);
2664 irdma_put_cqp_request(&rf->cqp, new_req);
2675 ibdev_dbg(&rf->iwdev->ibdev,
2677 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2682 irdma_put_cqp_request(&rf->cqp, cqp_request);
2689 * @rf: RDMA PCI function
2694 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2701 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2713 irdma_handle_cqp_op(rf, cqp_request);
2714 irdma_put_cqp_request(&rf->cqp, cqp_request);
2720 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2756 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,