• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/qlnx/qlnxe/

Lines Matching refs:cdev

116 static int qlnx_nic_setup(struct ecore_dev *cdev,
118 static int qlnx_nic_start(struct ecore_dev *cdev);
122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
502 for (i = 0; i < ha->cdev.num_hwfns; i++) {
503 if (&ha->cdev.hwfns[i] == p_hwfn) {
532 for (i = 0; i < ha->cdev.num_hwfns; i++) {
533 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
560 for (i = 0; i < ha->cdev.num_hwfns; i++) {
894 num_sp_msix = ha->cdev.num_hwfns;
899 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
900 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
964 for (i = 0; i < ha->cdev.num_hwfns; i++) {
965 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
1023 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1080 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1196 p_hwfn = &ha->cdev.hwfns[0];
1237 ecore_init_struct(&ha->cdev);
1250 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1252 ha->cdev.regview = ha->pci_reg;
1257 ha->cdev.b_is_vf = true;
1260 ha->cdev.doorbells = ha->pci_dbells;
1261 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1262 ha->cdev.db_size = ha->dbells_size;
1267 ha->cdev.doorbells = ha->pci_dbells;
1268 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1269 ha->cdev.db_size = ha->dbells_size;
1290 ecore_hw_prepare(&ha->cdev, &params);
1292 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1294 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1295 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1330 ecore_hw_remove(&ha->cdev);
1365 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1415 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1462 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1497 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
2502 struct ecore_dev *cdev;
2505 cdev = &ha->cdev;
2518 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2803 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
3171 struct ecore_dev *cdev;
3174 cdev = &ha->cdev;
3842 p_hwfn = &ha->cdev.hwfns[0];
4597 struct ecore_dev *cdev = &ha->cdev;
4609 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
5415 struct ecore_dev *cdev;
5421 cdev = p_hwfn->p_dev;
5424 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5445 struct ecore_dev *cdev;
5447 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5448 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5450 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5459 struct ecore_dev *cdev;
5461 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5462 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5464 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5473 struct ecore_dev *cdev;
5475 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5476 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5478 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5697 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5701 for (i = 0; i < cdev->num_hwfns; i++) {
5702 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5706 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5712 rc = ecore_resc_alloc(cdev);
5716 ecore_resc_setup(cdev);
5724 qlnx_nic_start(struct ecore_dev *cdev)
5733 params.int_mode = cdev->int_mode;
5737 rc = ecore_hw_init(cdev, &params);
5739 ecore_resc_free(cdev);
5749 struct ecore_dev *cdev;
5778 cdev = &ha->cdev;
5780 rc = qlnx_nic_setup(cdev, &pf_params);
5784 cdev->int_mode = ECORE_INT_MODE_MSIX;
5785 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5788 cdev->rx_coalesce_usecs = 255;
5789 cdev->tx_coalesce_usecs = 255;
5792 rc = qlnx_nic_start(cdev);
5794 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5795 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5809 struct ecore_dev *cdev;
5813 cdev = &ha->cdev;
5815 ecore_hw_stop(cdev);
5817 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5830 ecore_resc_free(cdev);
5836 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5841 memcpy(cdev->name, name, NAME_SIZE);
5843 for_each_hwfn(cdev, i) {
5844 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5847 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5853 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5860 ha = cdev;
5866 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5887 p_hwfn = &ha->cdev.hwfns[0];
5907 p_hwfn = &ha->cdev.hwfns[0];
5924 struct ecore_dev *cdev;
5926 cdev = &ha->cdev;
5998 struct ecore_dev *cdev;
6000 cdev = &ha->cdev;
6003 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
6010 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
6017 hwfn_index = sb_id % cdev->num_hwfns;
6018 p_hwfn = &cdev->hwfns[hwfn_index];
6019 rel_sb_id = sb_id / cdev->num_hwfns;
6021 QL_DPRINT2(((qlnx_host_t *)cdev),
6041 struct ecore_dev *cdev;
6043 cdev = &ha->cdev;
6046 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6053 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6055 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6086 struct ecore_dev *cdev;
6089 cdev = &ha->cdev;
6104 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6111 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6140 struct ecore_dev *cdev;
6142 cdev = &ha->cdev;
6256 struct ecore_dev *cdev;
6258 cdev = &ha->cdev;
6271 rc = ecore_chain_alloc(cdev,
6283 rc = ecore_chain_alloc(cdev,
6352 struct ecore_dev *cdev;
6354 cdev = &ha->cdev;
6361 ecore_chain_free(cdev, &txq->tx_pbl);
6374 struct ecore_dev *cdev;
6376 cdev = &ha->cdev;
6382 ret = ecore_chain_alloc(cdev,
6507 struct ecore_dev *cdev;
6509 cdev = &ha->cdev;
6535 qlnx_start_vport(struct ecore_dev *cdev,
6547 ha = (qlnx_host_t *)cdev;
6564 for_each_hwfn(cdev, i) {
6565 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6587 qlnx_update_vport(struct ecore_dev *cdev,
6594 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6623 for_each_hwfn(cdev, i) {
6624 p_hwfn = &cdev->hwfns[i];
6626 if ((cdev->num_hwfns > 1) &&
6632 fp_index = ((cdev->num_hwfns * j) + i) %
6752 struct ecore_dev *cdev = &ha->cdev;
6775 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6792 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6837 qparams.queue_id = txq->index / cdev->num_hwfns ;
6929 rc = qlnx_update_vport(cdev, &vport_update_params);
6971 struct ecore_dev *cdev;
6975 cdev = &ha->cdev;
6994 rc = qlnx_update_vport(cdev, &vport_update_params);
7015 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
7044 for_each_hwfn(cdev, i) {
7045 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7064 struct ecore_dev *cdev;
7067 cdev = &ha->cdev;
7077 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7086 struct ecore_dev *cdev;
7095 cdev = &ha->cdev;
7097 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7106 struct ecore_dev *cdev;
7109 cdev = &ha->cdev;
7126 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7159 struct ecore_dev *cdev;
7161 cdev = &ha->cdev;
7172 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7211 struct ecore_dev *cdev;
7218 cdev = &ha->cdev;
7220 for_each_hwfn(cdev, i) {
7221 hwfn = &cdev->hwfns[i];
7317 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7332 struct ecore_dev *cdev;
7335 cdev = &ha->cdev;
7385 qlnx_link_update(&ha->cdev.hwfns[0]);
7449 struct ecore_dev *cdev;
7453 cdev = &ha->cdev;
7463 ecore_hw_stop_fastpath(cdev);
7499 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7530 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7556 struct ecore_dev *cdev;
7567 cdev = &ha->cdev;
7569 for_each_hwfn(cdev, i) {
7570 hwfn = &cdev->hwfns[i];
7907 struct ecore_dev *cdev;
7909 cdev = p_hwfn->p_dev;
7911 for (i = 0; i < cdev->num_hwfns; i++) {
7912 if (&cdev->hwfns[i] == p_hwfn)
7916 if (i >= cdev->num_hwfns)
7928 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7960 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7993 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7999 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
8046 struct ecore_dev *cdev;
8049 cdev = &ha->cdev;
8051 ecore_iov_set_vfs_to_disable(cdev, true);
8053 for_each_hwfn(cdev, i) {
8054 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8082 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8092 ecore_iov_set_vfs_to_disable(cdev, false);
8127 struct ecore_dev *cdev;
8140 cdev = &ha->cdev;
8142 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8149 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8162 for_each_hwfn(cdev, j) {
8163 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8195 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8267 p_hwfn = &ha->cdev.hwfns[0];
8405 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8406 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8434 for (i = 0; i < ha->cdev.num_hwfns; i++) {