Lines Matching defs:hdev

36 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
45 dev_stat.status = hl_device_status(hdev);
51 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
56 struct asic_fixed_properties *prop = &hdev->asic_prop;
67 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
112 hw_ip.revision_id = hdev->pdev->revision;
121 static int hw_events_info(struct hl_device *hdev, bool aggregate,
131 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
133 dev_err(hdev->dev, "Events info not supported\n");
159 struct hl_device *hdev = hpriv->hdev;
163 struct asic_fixed_properties *prop = &hdev->asic_prop;
172 atomic64_read(&hdev->dram_used_mem);
181 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
190 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
200 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
224 dev_err(hdev->dev, "failed to copy input debug data\n");
242 rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
244 dev_err(hdev->dev,
251 dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
265 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
275 rc = hl_device_utilization(hdev, &device_util.utilization);
283 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
293 rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
301 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
310 reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
311 reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
317 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
326 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
336 struct hl_device *hdev = hpriv->hdev;
345 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
356 struct hl_device *hdev = hpriv->hdev;
365 mutex_lock(&hdev->clk_throttling.lock);
367 clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
370 if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
374 ktime_to_us(hdev->clk_throttling.timestamp[i].start);
376 if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
377 end_time = hdev->clk_throttling.timestamp[i].end;
383 hdev->clk_throttling.timestamp[i].start));
386 mutex_unlock(&hdev->clk_throttling.lock);
396 struct hl_device *hdev = hpriv->hdev;
400 cntr = &hdev->aggregated_cs_counters;
445 struct hl_device *hdev = hpriv->hdev;
446 struct asic_fixed_properties *prop = &hdev->asic_prop;
471 struct hl_device *hdev = hpriv->hdev;
480 rc = hl_fw_cpucp_total_energy_get(hdev,
491 struct hl_device *hdev = hpriv->hdev;
500 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
510 struct hl_device *hdev = hpriv->hdev;
519 rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
529 struct hl_device *hdev = hpriv->hdev;
538 hdev->last_open_session_duration_jif);
539 open_stats_info.open_counter = hdev->open_counter;
540 open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
541 open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
549 struct hl_device *hdev = hpriv->hdev;
558 rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
568 struct hl_device *hdev = hpriv->hdev;
577 rc = hl_fw_dram_replaced_row_get(hdev, &info);
587 struct hl_device *hdev = hpriv->hdev;
594 info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
602 struct hl_device *hdev = hpriv->hdev;
609 info.seq = hdev->captured_err_info.cs_timeout.seq;
610 info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
618 struct hl_device *hdev = hpriv->hdev;
625 razwi_info = &hdev->captured_err_info.razwi_info;
635 struct hl_device *hdev = hpriv->hdev;
643 info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
644 info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
645 info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
646 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
647 info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
648 info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
649 memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
659 struct hl_device *hdev = hpriv->hdev;
671 info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
697 rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
746 rc = hl_fw_get_dev_info_signed(hpriv->hdev,
813 struct hl_device *hdev = hpriv->hdev;
826 hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
829 dev_err(hdev->dev,
848 struct hl_device *hdev = hpriv->hdev;
855 pgf_info = &hdev->captured_err_info.page_fault_info;
867 struct hl_device *hdev = hpriv->hdev;
874 pgf_info = &hdev->captured_err_info.page_fault_info;
890 struct hl_device *hdev = hpriv->hdev;
898 info = &hdev->captured_err_info.hw_err;
912 struct hl_device *hdev = hpriv->hdev;
920 info = &hdev->captured_err_info.fw_err;
934 struct hl_device *hdev = hpriv->hdev;
942 info = &hdev->captured_err_info.engine_err;
953 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
971 dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
975 fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
981 dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
986 rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
991 dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
996 hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
1006 struct hl_device *hdev = hpriv->hdev;
1010 dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1020 return hw_ip_info(hdev, args);
1023 return device_status_info(hdev, args);
1026 return get_reset_count(hdev, args);
1029 return hw_events_info(hdev, false, args);
1032 return hw_events_info(hdev, true, args);
1088 if (!hl_device_operational(hdev, &status)) {
1091 hdev->status[status]);
1097 rc = hw_idle(hdev, args);
1101 rc = device_utilization(hdev, args);
1105 rc = get_clk_rate(hdev, args);
1109 return time_sync_info(hdev, args);
1140 return send_fw_generic_request(hdev, args);
1158 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
1174 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
1180 struct hl_device *hdev = hpriv->hdev;
1186 if (!hl_device_operational(hdev, &status)) {
1187 dev_dbg_ratelimited(hdev->dev,
1189 hdev->status[status]);
1201 if (!hdev->in_debug) {
1202 dev_err_ratelimited(hdev->dev,
1207 rc = debug_coresight(hdev, hpriv->ctx, args);
1211 rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
1215 dev_err(hdev->dev, "Invalid request %d\n", args->op);
1299 struct hl_device *hdev = hpriv->hdev;
1303 if (!hdev) {
1313 dev_dbg_ratelimited(hdev->dev_ctrl,
1319 return _hl_ioctl(hpriv, cmd, arg, ioctl, hdev->dev_ctrl);