Lines Matching refs:hdev

17 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
22 hdev->mbx_resp.received_resp = false;
23 hdev->mbx_resp.origin_mbx_msg = 0;
24 hdev->mbx_resp.resp_status = 0;
25 hdev->mbx_resp.match_id++;
27 if (hdev->mbx_resp.match_id == 0)
28 hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
29 memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
34 * @hdev: pointer to struct hclgevf_dev
40 static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
50 dev_err(&hdev->pdev->dev,
57 while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
59 &hdev->hw.hw.comm_state))
70 dev_err(&hdev->pdev->dev,
72 code0, code1, hdev->mbx_resp.received_resp, i);
76 mbx_resp = &hdev->mbx_resp;
86 hclgevf_reset_mbx_resp_status(hdev);
89 dev_err(&hdev->pdev->dev,
92 dev_err(&hdev->pdev->dev,
101 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
112 dev_err(&hdev->pdev->dev,
123 if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
124 trace_hclge_vf_mbx_send(hdev, req);
128 mutex_lock(&hdev->mbx_resp.mbx_mutex);
129 hclgevf_reset_mbx_resp_status(hdev);
130 req->match_id = cpu_to_le16(hdev->mbx_resp.match_id);
131 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
133 dev_err(&hdev->pdev->dev,
136 mutex_unlock(&hdev->mbx_resp.mbx_mutex);
140 status = hclgevf_get_mbx_resp(hdev, send_msg->code,
143 mutex_unlock(&hdev->mbx_resp.mbx_mutex);
146 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
148 dev_err(&hdev->pdev->dev,
165 static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
170 struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
175 dev_warn(&hdev->pdev->dev,
191 * ignore the response. and driver will clear hdev->mbx_resp
201 static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
207 if (atomic_read(&hdev->arq.count) >=
209 dev_warn(&hdev->pdev->dev,
216 memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
218 hclge_mbx_tail_ptr_move_arq(hdev->arq);
219 atomic_inc(&hdev->arq.count);
221 hclgevf_mbx_task_schedule(hdev);
224 void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
232 crq = &hdev->hw.hw.cmq.crq;
234 while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
236 &hdev->hw.hw.comm_state)) {
237 dev_info(&hdev->pdev->dev, "vf crq need init\n");
247 dev_warn(&hdev->pdev->dev,
257 trace_hclge_vf_mbx_get(hdev, req);
267 hclgevf_handle_mbx_response(hdev, req);
274 hclgevf_handle_mbx_msg(hdev, req);
277 dev_err(&hdev->pdev->dev,
287 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
291 static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
295 dev_info(&hdev->pdev->dev,
299 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
314 tail = hdev->arq.tail;
317 while (tail != hdev->arq.head) {
319 &hdev->hw.hw.comm_state)) {
320 dev_info(&hdev->pdev->dev,
325 msg_q = hdev->arq.msg_q[hdev->arq.head];
336 hclgevf_update_speed_duplex(hdev, speed, duplex);
337 hclgevf_update_link_status(hdev, link_status);
341 &hdev->state);
348 hdev->hw.mac.supported =
351 hdev->hw.mac.advertising =
362 set_bit(reset_type, &hdev->reset_pending);
363 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
364 hclgevf_reset_task_schedule(hdev);
371 hclgevf_update_port_base_vlan_info(hdev, state,
375 hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
378 dev_err(&hdev->pdev->dev,
384 hclge_mbx_head_ptr_move_arq(hdev->arq);
385 atomic_dec(&hdev->arq.count);
386 msg_q = hdev->arq.msg_q[hdev->arq.head];