• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/qlge/

Lines Matching defs:qdev

43 static int ql_update_ring_coalescing(struct ql_adapter *qdev)
49 if (!netif_running(qdev->ndev))
55 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
56 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
58 qdev->tx_max_coalesced_frames) {
59 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
60 rx_ring = &qdev->rx_ring[i];
62 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
64 cpu_to_le16(qdev->tx_max_coalesced_frames);
66 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
69 netif_err(qdev, ifup, qdev->ndev,
77 cqicb = (struct cqicb *)&qdev->rx_ring[0];
78 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
80 qdev->rx_max_coalesced_frames) {
81 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
82 rx_ring = &qdev->rx_ring[i];
84 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
86 cpu_to_le16(qdev->rx_max_coalesced_frames);
88 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
91 netif_err(qdev, ifup, qdev->ndev,
101 static void ql_update_stats(struct ql_adapter *qdev)
105 u64 *iter = &qdev->nic_stats.tx_pkts;
107 spin_lock(&qdev->stats_lock);
108 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
109 netif_err(qdev, drv, qdev->ndev,
117 if (ql_read_xgmac_reg64(qdev, i, &data)) {
118 netif_err(qdev, drv, qdev->ndev,
131 if (ql_read_xgmac_reg64(qdev, i, &data)) {
132 netif_err(qdev, drv, qdev->ndev,
145 if (ql_read_xgmac_reg64(qdev, i, &data)) {
146 netif_err(qdev, drv, qdev->ndev,
159 if (ql_read_xgmac_reg64(qdev, i, &data)) {
160 netif_err(qdev, drv, qdev->ndev,
172 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
173 netif_err(qdev, drv, qdev->ndev,
179 ql_sem_unlock(qdev, qdev->xg_sem_mask);
181 spin_unlock(&qdev->stats_lock);
183 QL_DUMP_STAT(qdev);
273 struct ql_adapter *qdev = netdev_priv(ndev);
274 struct nic_stats *s = &qdev->nic_stats;
276 ql_update_stats(qdev);
342 struct ql_adapter *qdev = netdev_priv(ndev);
348 if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
368 struct ql_adapter *qdev = netdev_priv(ndev);
372 (qdev->fw_rev_id & 0x00ff0000) >> 16,
373 (qdev->fw_rev_id & 0x0000ff00) >> 8,
374 (qdev->fw_rev_id & 0x000000ff));
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
384 struct ql_adapter *qdev = netdev_priv(ndev);
388 wol->wolopts = qdev->wol;
393 struct ql_adapter *qdev = netdev_priv(ndev);
398 qdev->wol = wol->wolopts;
400 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
401 if (!qdev->wol) {
403 status = ql_mb_wol_mode(qdev, wol);
404 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
414 struct ql_adapter *qdev = netdev_priv(ndev);
419 status = ql_mb_get_led_cfg(qdev);
422 led_reg = qdev->led_config;
429 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
432 status = ql_mb_set_led_cfg(qdev, led_reg);
439 static int ql_start_loopback(struct ql_adapter *qdev)
441 if (netif_carrier_ok(qdev->ndev)) {
442 set_bit(QL_LB_LINK_UP, &qdev->flags);
443 netif_carrier_off(qdev->ndev);
445 clear_bit(QL_LB_LINK_UP, &qdev->flags);
446 qdev->link_config |= CFG_LOOPBACK_PCS;
447 return ql_mb_set_port_cfg(qdev);
450 static void ql_stop_loopback(struct ql_adapter *qdev)
452 qdev->link_config &= ~CFG_LOOPBACK_PCS;
453 ql_mb_set_port_cfg(qdev);
454 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
455 netif_carrier_on(qdev->ndev);
456 clear_bit(QL_LB_LINK_UP, &qdev->flags);
470 void ql_check_lb_frame(struct ql_adapter *qdev,
478 atomic_dec(&qdev->lb_count);
483 static int ql_run_loopback_test(struct ql_adapter *qdev)
491 skb = netdev_alloc_skb(qdev->ndev, size);
498 rc = ql_lb_send(skb, qdev->ndev);
501 atomic_inc(&qdev->lb_count);
505 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
506 return atomic_read(&qdev->lb_count) ? -EIO : 0;
509 static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
511 *data = ql_start_loopback(qdev);
514 *data = ql_run_loopback_test(qdev);
516 ql_stop_loopback(qdev);
523 struct ql_adapter *qdev = netdev_priv(ndev);
526 set_bit(QL_SELFTEST, &qdev->flags);
529 if (ql_loopback_test(qdev, &data[0]))
536 clear_bit(QL_SELFTEST, &qdev->flags);
542 netif_err(qdev, drv, qdev->ndev,
556 struct ql_adapter *qdev = netdev_priv(ndev);
558 ql_gen_reg_dump(qdev, p);
563 struct ql_adapter *qdev = netdev_priv(dev);
565 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
566 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
578 c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
579 c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
586 struct ql_adapter *qdev = netdev_priv(ndev);
589 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
594 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
600 if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
601 qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
602 qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
603 qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
606 qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
607 qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
608 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
609 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
611 return ql_update_ring_coalescing(qdev);
617 struct ql_adapter *qdev = netdev_priv(netdev);
619 ql_mb_get_port_cfg(qdev);
620 if (qdev->link_config & CFG_PAUSE_STD) {
629 struct ql_adapter *qdev = netdev_priv(netdev);
633 qdev->link_config |= CFG_PAUSE_STD;
635 qdev->link_config &= ~CFG_PAUSE_STD;
639 status = ql_mb_set_port_cfg(qdev);
647 struct ql_adapter *qdev = netdev_priv(netdev);
648 return qdev->rx_csum;
653 struct ql_adapter *qdev = netdev_priv(netdev);
654 qdev->rx_csum = data;
673 struct ql_adapter *qdev = netdev_priv(ndev);
674 return qdev->msg_enable;
679 struct ql_adapter *qdev = netdev_priv(ndev);
680 qdev->msg_enable = value;