Lines Matching defs:hdev

11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
19 hdev->tm_info.tc_info[i].tc_sch_mode =
21 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
24 hdev->tm_info.tc_info[i].tc_sch_mode =
26 hdev->tm_info.pg_info[0].tc_dwrr[i] =
39 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
51 ets->ets_cap = hdev->tc_max;
54 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55 if (i < hdev->tm_info.num_tc)
56 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
60 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
72 struct hclge_dev *hdev = vport->back;
74 hclge_tm_info_to_ieee_ets(hdev, ets);
79 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
84 if (num_tc > hdev->tc_max) {
85 dev_err(&hdev->pdev->dev,
87 num_tc, hdev->tc_max);
93 dev_err(&hdev->pdev->dev,
100 if (num_tc > hdev->vport[0].alloc_tqps) {
101 dev_err(&hdev->pdev->dev,
103 num_tc, hdev->vport[0].alloc_tqps);
110 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
117 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
128 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
139 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
145 dev_err(&hdev->pdev->dev,
155 dev_err(&hdev->pdev->dev,
160 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
178 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
184 tc_num = hclge_ets_tc_changed(hdev, ets, changed);
186 ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
190 ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
195 if (*tc != hdev->tm_info.num_tc)
201 static int hclge_map_update(struct hclge_dev *hdev)
205 ret = hclge_tm_schd_setup_hw(hdev);
209 ret = hclge_pause_setup_hw(hdev, false);
213 ret = hclge_buffer_alloc(hdev);
217 hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
219 return hclge_rss_init_hw(hdev);
222 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
226 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
230 ret = hclge_tm_flush_cfg(hdev, true);
234 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
237 static int hclge_notify_init_up(struct hclge_dev *hdev)
241 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
245 ret = hclge_tm_flush_cfg(hdev, false);
249 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
256 struct hclge_dev *hdev = vport->back;
261 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
265 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
272 ret = hclge_notify_down_uinit(hdev);
277 hclge_tm_schd_info_update(hdev, num_tc);
280 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
285 ret = hclge_map_update(hdev);
289 return hclge_notify_init_up(hdev);
292 return hclge_tm_dwrr_cfg(hdev);
298 hclge_notify_init_up(hdev);
306 struct hclge_dev *hdev = vport->back;
310 pfc->pfc_cap = hdev->pfc_max;
311 pfc->pfc_en = hdev->tm_info.pfc_en;
313 ret = hclge_mac_update_stats(hdev);
315 dev_err(&hdev->pdev->dev,
320 hclge_pfc_tx_stats_get(hdev, pfc->requests);
321 hclge_pfc_rx_stats_get(hdev, pfc->indications);
330 struct hclge_dev *hdev = vport->back;
335 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
338 if (pfc->pfc_en == hdev->tm_info.pfc_en)
341 prio_tc = hdev->tm_info.prio_tc;
344 for (i = 0; i < hdev->tm_info.num_tc; i++) {
353 hdev->tm_info.hw_pfc_map = pfc_map;
354 hdev->tm_info.pfc_en = pfc->pfc_en;
358 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
360 hclge_tm_pfc_info_update(hdev);
362 ret = hclge_pause_setup_hw(hdev, false);
366 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
370 ret = hclge_tm_flush_cfg(hdev, true);
379 ret = hclge_buffer_alloc(hdev);
383 ret = hclge_tm_flush_cfg(hdev, false);
387 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
398 struct hclge_dev *hdev = vport->back;
407 dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
422 ret = hclge_dscp_to_tc_map(hdev);
424 dev_err(&hdev->pdev->dev,
444 struct hclge_dev *hdev = vport->back;
453 dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
461 ret = hclge_dscp_to_tc_map(hdev);
463 dev_err(&hdev->pdev->dev,
475 ret = hclge_up_to_tc_map(hdev);
485 struct hclge_dev *hdev = vport->back;
490 return hdev->dcbx_cap;
497 struct hclge_dev *hdev = vport->back;
507 hdev->dcbx_cap = mode;
512 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
524 ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
531 dev_err(&hdev->pdev->dev,
536 if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
537 dev_err(&hdev->pdev->dev,
539 hdev->pf_rss_size_max);
544 dev_err(&hdev->pdev->dev,
550 dev_err(&hdev->pdev->dev,
558 if (hdev->vport[0].alloc_tqps < queue_sum) {
559 dev_err(&hdev->pdev->dev,
561 hdev->vport[0].alloc_tqps);
581 static int hclge_config_tc(struct hclge_dev *hdev,
586 hclge_tm_schd_info_update(hdev, tc_info->num_tc);
588 hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
590 return hclge_map_update(hdev);
599 struct hclge_dev *hdev = vport->back;
608 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
615 ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
617 dev_err(&hdev->pdev->dev,
624 ret = hclge_notify_down_uinit(hdev);
632 ret = hclge_config_tc(hdev, &kinfo->tc_info);
636 return hclge_notify_init_up(hdev);
640 dev_warn(&hdev->pdev->dev,
646 if (hclge_config_tc(hdev, &kinfo->tc_info))
647 dev_err(&hdev->pdev->dev,
650 hclge_notify_init_up(hdev);
667 void hclge_dcb_ops_set(struct hclge_dev *hdev)
669 struct hclge_vport *vport = hdev->vport;
675 if (!hnae3_dev_dcb_supported(hdev) ||
681 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;