• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/

Lines Matching refs:ring

58  * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
288 static void cas_disable_irq(struct cas *cp, const int ring)
291 if (ring == 0) {
298 switch (ring) {
310 cp->regs + REG_PLUS_INTRN_MASK(ring));
315 REG_PLUS_INTRN_MASK(ring));
353 static void cas_enable_irq(struct cas *cp, const int ring)
355 if (ring == 0) { /* all but TX_DONE */
361 switch (ring) {
373 REG_PLUS_INTRN_MASK(ring));
1317 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1338 /* this needs to be changed if we actually use the ENC RX DESC ring */
1339 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1359 /* only clean ring 0 as ring 1 is used for spare buffers */
1747 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1754 spin_lock(&cp->tx_lock[ring]);
1755 txds = cp->init_txds[ring];
1756 skbs = cp->tx_skbs[ring];
1757 entry = cp->tx_old[ring];
1759 count = TX_BUFF_COUNT(ring, entry, limit);
1768 entry = TX_DESC_NEXT(ring, entry);
1774 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1780 cp->dev->name, ring, entry);
1783 cp->tx_tiny_use[ring][entry].nbufs = 0;
1793 entry = TX_DESC_NEXT(ring, entry);
1796 if (cp->tx_tiny_use[ring][entry].used) {
1797 cp->tx_tiny_use[ring][entry].used = 0;
1798 entry = TX_DESC_NEXT(ring, entry);
1802 spin_lock(&cp->stat_lock[ring]);
1803 cp->net_stats[ring].tx_packets++;
1804 cp->net_stats[ring].tx_bytes += skb->len;
1805 spin_unlock(&cp->stat_lock[ring]);
1808 cp->tx_old[ring] = entry;
1815 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1817 spin_unlock(&cp->tx_lock[ring]);
1823 int limit, ring;
1831 for (ring = 0; ring < N_TX_RINGS; ring++) {
1838 limit = readl(cp->regs + REG_TX_COMPN(ring));
1840 if (cp->tx_old[ring] != limit)
1841 cas_tx_ringN(cp, ring, limit);
2080 /* put rx descriptor back on ring. if a buffer is in use by a higher
2083 static void cas_post_page(struct cas *cp, const int ring, const int index)
2088 entry = cp->rx_old[ring];
2090 new = cas_page_swap(cp, ring, index);
2091 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2092 cp->init_rxds[ring][entry].index =
2094 CAS_BASE(RX_INDEX_RING, ring));
2096 entry = RX_DESC_ENTRY(ring, entry + 1);
2097 cp->rx_old[ring] = entry;
2102 if (ring == 0)
2111 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2115 cas_page_t **page = cp->rx_pages[ring];
2117 entry = cp->rx_old[ring];
2121 cp->dev->name, ring, entry);
2125 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2135 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2139 cp->rx_old[ring] = entry;
2140 cp->rx_last[ring] = num ? num - released : 0;
2146 cp->init_rxds[ring][entry].buffer =
2157 entry = RX_DESC_ENTRY(ring, entry + 1);
2159 cp->rx_old[ring] = entry;
2164 if (ring == 0)
2173 /* process a completion ring. packets are set up in three basic ways:
2183 * force serialization on the single descriptor ring.
2185 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2187 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2193 cp->dev->name, ring,
2195 cp->rx_new[ring]);
2197 entry = cp->rx_new[ring];
2223 spin_lock(&cp->stat_lock[ring]);
2224 cp->net_stats[ring].rx_errors++;
2226 cp->net_stats[ring].rx_length_errors++;
2228 cp->net_stats[ring].rx_crc_errors++;
2229 spin_unlock(&cp->stat_lock[ring]);
2233 spin_lock(&cp->stat_lock[ring]);
2234 ++cp->net_stats[ring].rx_dropped;
2235 spin_unlock(&cp->stat_lock[ring]);
2255 spin_lock(&cp->stat_lock[ring]);
2256 cp->net_stats[ring].rx_packets++;
2257 cp->net_stats[ring].rx_bytes += len;
2258 spin_unlock(&cp->stat_lock[ring]);
2287 entry = RX_COMP_ENTRY(ring, entry + 1 +
2294 cp->rx_new[ring] = entry;
2303 /* put completion entries back on the ring */
2305 struct cas *cp, int ring)
2307 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2310 last = cp->rx_cur[ring];
2311 entry = cp->rx_new[ring];
2314 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2320 last = RX_COMP_ENTRY(ring, last + 1);
2322 cp->rx_cur[ring] = last;
2324 if (ring == 0)
2327 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2332 /* cassini can use all four PCI interrupts for the completion ring.
2338 const int ring)
2341 cas_post_rxcs_ringN(dev, cp, ring);
2349 int ring;
2350 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2356 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2363 cas_rx_ringN(cp, ring, 0);
2369 cas_handle_irqN(dev, cp, status, ring);
2396 /* ring 2 handles a few more events than 3 and 4 */
2500 * ring N_RX_COMP_RING times with a request of
2631 static inline int cas_intme(int ring, int entry)
2634 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2640 static void cas_write_txd(struct cas *cp, int ring, int entry,
2643 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2646 if (cas_intme(ring, entry))
2654 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2657 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2660 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2663 cp->tx_tiny_use[ring][tentry].nbufs++;
2664 cp->tx_tiny_use[ring][entry].used = 1;
2665 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2668 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2678 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2681 if (TX_BUFFS_AVAIL(cp, ring) <=
2684 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2700 entry = cp->tx_new[ring];
2701 cp->tx_skbs[ring][entry] = skb;
2713 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2715 entry = TX_DESC_NEXT(ring, entry);
2718 tx_tiny_buf(cp, ring, entry), tabort);
2719 mapping = tx_tiny_map(cp, ring, entry, tentry);
2720 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2723 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2726 entry = TX_DESC_NEXT(ring, entry);
2741 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2743 entry = TX_DESC_NEXT(ring, entry);
2746 memcpy(tx_tiny_buf(cp, ring, entry),
2750 mapping = tx_tiny_map(cp, ring, entry, tentry);
2754 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2756 entry = TX_DESC_NEXT(ring, entry);
2759 cp->tx_new[ring] = entry;
2760 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2766 dev->name, ring, entry, skb->len,
2767 TX_BUFFS_AVAIL(cp, ring));
2768 writel(entry, cp->regs + REG_TX_KICKN(ring));
2769 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2780 static int ring;
2785 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2813 /* write out tx ring info and tx desc bases */
3789 static void cas_clean_txd(struct cas *cp, int ring)
3791 struct cas_tx_desc *txd = cp->init_txds[ring];
3792 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3796 size = TX_DESC_RINGN_SIZE(ring);
3825 if (cp->tx_tiny_use[ring][ent].used)
3833 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3837 static inline void cas_free_rx_desc(struct cas *cp, int ring)
3839 cas_page_t **page = cp->rx_pages[ring];
3842 size = RX_DESC_RINGN_SIZE(ring);
3877 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3879 cas_page_t **page = cp->rx_pages[ring];
3882 size = RX_DESC_RINGN_SIZE(ring);
3917 /* The link went down, we reset the ring, but keep
4308 /* saved bits that are unique to ring 0 */