• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/vxge/

Lines Matching refs:hldev

130 	if (vpath->hldev->first_vp_id != vpath->vp_id)
259 * @hldev: HW device handle.
262 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
271 hldev->config.intr_mode = intr_mode;
277 * @hldev: HW device handle.
286 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
292 vxge_hw_device_mask_all(hldev);
296 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
300 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
303 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
304 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
305 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
308 writeq(val64, &hldev->common_reg->tim_int_status0);
310 writeq(~val64, &hldev->common_reg->tim_int_mask0);
313 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
314 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
318 &hldev->common_reg->tim_int_status1);
321 &hldev->common_reg->tim_int_mask1);
325 val64 = readq(&hldev->common_reg->titan_general_int_status);
327 vxge_hw_device_unmask_all(hldev);
332 * @hldev: HW device handle.
340 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
344 vxge_hw_device_mask_all(hldev);
347 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
349 &hldev->common_reg->tim_int_mask1);
353 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
357 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
363 * @hldev: HW device handle.
369 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
377 &hldev->common_reg->titan_mask_all_int);
382 * @hldev: HW device handle.
388 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
392 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
396 &hldev->common_reg->titan_mask_all_int);
401 * @hldev: HW device handle.
407 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
411 val32 = readl(&hldev->common_reg->titan_general_int_status);
416 * @hldev: HW device handle.
433 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
442 val64 = readq(&hldev->common_reg->titan_general_int_status);
453 adapter_status = readq(&hldev->common_reg->adapter_status);
457 __vxge_hw_device_handle_error(hldev,
465 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
469 vpath_mask = hldev->vpaths_deployed >>
474 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
479 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
486 hldev->stats.sw_dev_err_stats.vpath_alarms++;
490 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
494 &hldev->virtual_paths[i], skip_alarms);
511 * @hldev: HW device handle.
517 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
522 if (hldev->link_state == VXGE_HW_LINK_UP)
525 hldev->link_state = VXGE_HW_LINK_UP;
528 if (hldev->uld_callbacks.link_up)
529 hldev->uld_callbacks.link_up(hldev);
536 * @hldev: HW device handle.
542 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
547 if (hldev->link_state == VXGE_HW_LINK_DOWN)
550 hldev->link_state = VXGE_HW_LINK_DOWN;
553 if (hldev->uld_callbacks.link_down)
554 hldev->uld_callbacks.link_down(hldev);
561 * @hldev: HW device
569 struct __vxge_hw_device *hldev,
602 if (hldev->uld_callbacks.crit_err)
603 hldev->uld_callbacks.crit_err(
604 (struct __vxge_hw_device *)hldev,
614 * @hldev: HW device.
621 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
624 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
625 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
626 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
627 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
628 &hldev->common_reg->tim_int_status0);
631 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
632 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
634 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
635 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
636 &hldev->common_reg->tim_int_status1);
1743 if (!(vpath->hldev->access_rights &
1908 struct __vxge_hw_device *hldev = NULL;
1920 hldev = vpath->hldev;
1968 __vxge_hw_device_handle_link_down_ind(hldev);
1989 __vxge_hw_device_handle_link_up_ind(hldev);
2154 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2160 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2224 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2227 if (vpath->hldev->config.intr_mode ==
2234 if (vpath->hldev->config.intr_mode ==
2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2264 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2282 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2283 if (hldev->config.intr_mode ==
2287 &hldev->common_reg->
2292 &hldev->common_reg->
2312 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2315 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2347 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2352 val64 = readq(&hldev->common_reg->tim_int_mask0);
2358 &hldev->common_reg->tim_int_mask0);
2361 val64 = readl(&hldev->common_reg->tim_int_mask1);
2368 &hldev->common_reg->tim_int_mask1);
2385 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2390 val64 = readq(&hldev->common_reg->tim_int_mask0);
2396 &hldev->common_reg->tim_int_mask0);
2404 &hldev->common_reg->tim_int_mask1);