Lines Matching refs:rf

7  * @rf: RDMA PCI function
13 int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
25 spin_lock_irqsave(&rf->arp_lock, flags);
26 for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
27 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
33 if (arp_index != rf->arp_table_size) {
39 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
40 (u32 *)&arp_index, &rf->next_arp_index)) {
45 memcpy(rf->arp_table[arp_index].ip_addr, ip,
46 sizeof(rf->arp_table[arp_index].ip_addr));
47 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
50 if (arp_index == rf->arp_table_size)
54 if (arp_index == rf->arp_table_size) {
59 memset(rf->arp_table[arp_index].ip_addr, 0,
60 sizeof(rf->arp_table[arp_index].ip_addr));
61 eth_zero_addr(rf->arp_table[arp_index].mac_addr);
62 irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
69 spin_unlock_irqrestore(&rf->arp_lock, flags);
75 * @rf: RDMA function
80 int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
84 arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
86 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
89 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
93 irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
95 return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
173 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
180 irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
223 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
230 irdma_add_arp(iwdev->rf, local_ipaddr6, false,
285 irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
288 irdma_manage_arp_cache(iwdev->rf, neigh->ha,
366 irdma_manage_arp_cache(iwdev->rf,
403 irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
529 * @rf: RDMA PCI function
531 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
533 struct irdma_sc_dev *dev = &rf->sc_dev;
534 struct irdma_cqp *cqp = &rf->cqp;
560 * @rf: RDMA PCI function
563 static int irdma_wait_event(struct irdma_pci_f *rf,
570 cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
572 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
578 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
583 if (!rf->reset) {
584 rf->reset = true;
585 rf->gen_ops.request_reset(rf);
597 if (!rf->reset) {
598 rf->reset = true;
599 rf->gen_ops.request_reset(rf);
693 * @rf: RDMA PCI function
696 int irdma_handle_cqp_op(struct irdma_pci_f *rf,
699 struct irdma_sc_dev *dev = &rf->sc_dev;
704 if (rf->reset)
714 status = irdma_wait_event(rf, cqp_request);
725 ibdev_err(&rf->iwdev->ibdev,
732 irdma_put_cqp_request(&rf->cqp, cqp_request);
751 spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
753 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
758 iwdev->rf->qp_table[qp_num] = NULL;
759 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
777 spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags);
779 spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
783 iwdev->rf->cq_table[iwcq->cq_num] = NULL;
784 spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
802 if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
805 return &iwdev->rf->qp_table[qpn]->ibqp;
837 struct irdma_pci_f *rf = dev_to_rf(dev);
840 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
852 status = irdma_handle_cqp_op(rf, cqp_request);
853 irdma_put_cqp_request(&rf->cqp, cqp_request);
869 struct irdma_pci_f *rf = dev_to_rf(dev);
872 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
882 status = irdma_handle_cqp_op(rf, cqp_request);
883 irdma_put_cqp_request(&rf->cqp, cqp_request);
985 struct irdma_pci_f *rf = dev_to_rf(dev);
988 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1002 status = irdma_handle_cqp_op(rf, cqp_request);
1003 irdma_put_cqp_request(&rf->cqp, cqp_request);
1019 struct irdma_pci_f *rf = dev_to_rf(dev);
1022 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1036 status = irdma_handle_cqp_op(rf, cqp_request);
1037 irdma_put_cqp_request(&rf->cqp, cqp_request);
1049 struct irdma_pci_f *rf = dev_to_rf(dev);
1050 struct irdma_cqp *iwcqp = &rf->cqp;
1065 status = irdma_handle_cqp_op(rf, cqp_request);
1078 struct irdma_pci_f *rf = dev_to_rf(dev);
1079 struct irdma_cqp *iwcqp = &rf->cqp;
1099 status = irdma_handle_cqp_op(rf, cqp_request);
1107 * @rf: RDMA PCI function
1110 static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
1120 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1131 cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1133 status = irdma_handle_cqp_op(rf, cqp_request);
1136 irdma_put_cqp_request(&rf->cqp, cqp_request);
1146 struct irdma_pci_f *rf = iwdev->rf;
1150 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1158 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1159 dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1162 dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1171 * @rf: RDMA PCI function
1174 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1179 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1189 irdma_handle_cqp_op(rf, cqp_request);
1190 irdma_put_cqp_request(&rf->cqp, cqp_request);
1219 struct irdma_pci_f *rf = iwdev->rf;
1224 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1239 status = irdma_handle_cqp_op(rf, cqp_request);
1240 irdma_put_cqp_request(&rf->cqp, cqp_request);
1259 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1261 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1275 irdma_handle_cqp_op(rf, cqp_request);
1276 irdma_put_cqp_request(&rf->cqp, cqp_request);
1295 struct irdma_pci_f *rf = dev_to_rf(dev);
1297 irdma_cq_wq_destroy(rf, cq);
1307 struct irdma_pci_f *rf = dev_to_rf(dev);
1308 struct irdma_cqp *iwcqp = &rf->cqp;
1325 status = irdma_handle_cqp_op(rf, cqp_request);
1326 irdma_put_cqp_request(&rf->cqp, cqp_request);
1339 struct irdma_pci_f *rf = dev_to_rf(dev);
1341 ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
1344 irdma_gen_ae(rf, qp, &info, false);
1756 struct irdma_pci_f *rf = dev_to_rf(dev);
1757 struct irdma_cqp *iwcqp = &rf->cqp;
1772 cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1776 status = irdma_handle_cqp_op(rf, cqp_request);
1779 irdma_put_cqp_request(&rf->cqp, cqp_request);
1793 struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1794 struct irdma_cqp *iwcqp = &rf->cqp;
1812 cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1813 status = irdma_handle_cqp_op(rf, cqp_request);
1832 struct irdma_pci_f *rf = dev_to_rf(dev);
1835 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1845 status = irdma_handle_cqp_op(rf, cqp_request);
1846 irdma_put_cqp_request(&rf->cqp, cqp_request);
1862 struct irdma_pci_f *rf = dev_to_rf(dev);
1865 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1875 status = irdma_handle_cqp_op(rf, cqp_request);
1876 irdma_put_cqp_request(&rf->cqp, cqp_request);
1890 struct irdma_pci_f *rf = dev_to_rf(dev);
1891 struct irdma_cqp *iwcqp = &rf->cqp;
1898 if (!rf->sc_dev.ceq_valid)
1914 status = irdma_handle_cqp_op(rf, cqp_request);
1924 ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
1931 irdma_put_cqp_request(&rf->cqp, cqp_request);
1938 * @rf: RDMA PCI function
1947 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
1959 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1969 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
1973 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
1980 status = irdma_handle_cqp_op(rf, cqp_request);
1981 irdma_put_cqp_request(&rf->cqp, cqp_request);
2042 struct irdma_pci_f *rf = dev_to_rf(dev);
2050 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
2051 &ah_info->ah_idx, &rf->next_ah);
2059 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2062 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2070 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2084 struct irdma_pci_f *rf = dev_to_rf(dev);
2090 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2091 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2310 struct irdma_pci_f *rf = dev_to_rf(dev);
2314 if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2328 struct irdma_pci_f *rf = dev_to_rf(dev);
2330 irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2342 if (qp->iwdev->rf->reset)