Searched refs:cur_tx (Results 1 - 25 of 48) sorted by relevance

12

/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/arcnet/
H A Darcnet.c425 lp->cur_tx = lp->next_tx = -1;
606 ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol);
688 BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n",
689 ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx);
691 if (lp->cur_tx != -1 || lp->next_tx == -1)
696 lp->cur_tx = lp->next_tx;
700 ACOMMAND(TXcmd | (lp->cur_tx << 3));
727 ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
744 if (lp->cur_tx == -1)
851 if (lp->cur_tx !
[all...]
H A Darc-rawmode.c172 lp->next_tx, lp->cur_tx, bufnum);
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/
H A Datarilance.c223 int cur_rx, cur_tx; /* The next free ring entry */ member in struct:lance_private
698 lp->cur_rx = lp->cur_tx = 0;
756 DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
757 lp->dirty_tx, lp->cur_tx,
828 entry = lp->cur_tx & TX_RING_MOD_MASK;
842 lp->cur_tx++;
843 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
844 lp->cur_tx -= TX_RING_SIZE;
899 while( dirty_tx < lp->cur_tx) {
934 if (lp->cur_tx
[all...]
H A Dpci-skeleton.c465 atomic_t cur_tx; member in struct:netdrv_private
1207 atomic_set (&tp->cur_tx, 0);
1266 atomic_set (&tp->cur_tx, 0);
1310 dev->name, atomic_read (&tp->cur_tx),
1340 entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC;
1354 atomic_inc (&tp->cur_tx);
1355 if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC)
1369 int cur_tx, dirty_tx, tx_left; local
1377 cur_tx = atomic_read (&tp->cur_tx);
[all...]
H A Dsundance.c380 unsigned int cur_tx, dirty_tx; member in struct:netdev_private
934 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
935 np->cur_tx, np->cur_tx % TX_RING_SIZE,
950 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
964 np->cur_rx = np->cur_tx = 0;
1007 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1010 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1036 entry = np->cur_tx % TX_RING_SIZE;
1047 /* Increment cur_tx befor
[all...]
H A Dfealnx.c405 struct fealnx_desc *cur_tx; member in struct:netdev_private
1164 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1285 np->cur_tx = &np->tx_ring[0];
1390 np->cur_tx = &np->tx_ring[0];
1498 long tx_status = np->cur_tx->status;
1499 long tx_control = np->cur_tx->control;
1504 next = np->cur_tx->next_desc_logical;
1541 pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
1542 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
1543 dev_kfree_skb_irq(np->cur_tx
[all...]
H A Dariadne.c99 int cur_tx, cur_rx; /* The next free ring entry */ member in struct:ariadne_private
339 priv->cur_rx = priv->cur_tx = 0;
442 while (dirty_tx < priv->cur_tx) {
479 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
481 "full=%d.\n", dirty_tx, priv->cur_tx, priv->tx_full);
487 dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
554 entry = priv->cur_tx % TX_RING_SIZE;
568 priv->cur_tx++;
569 if ((priv->cur_tx >= TX_RING_SIZE) && (priv->dirty_tx >= TX_RING_SIZE)) {
572 priv->cur_tx
[all...]
H A Dyellowfin.c327 unsigned int cur_tx, dirty_tx; member in struct:yellowfin_private
694 dev->name, yp->cur_tx, yp->dirty_tx,
716 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
730 yp->cur_rx = yp->cur_tx = 0;
816 "ownership" bit last, and only then increment cur_tx. */
819 entry = yp->cur_tx % TX_RING_SIZE;
849 yp->cur_tx++;
857 yp->cur_tx++;
859 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
874 if (yp->cur_tx
[all...]
H A Deepro100.c422 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */ member in struct:speedo_private
971 sp->cur_tx = 0;
1095 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1099 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1210 dev->name, sp->cur_tx, sp->dirty_tx);
1214 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1287 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1348 sp->dirty_tx, sp->cur_tx,
1398 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1407 entry = sp->cur_tx
[all...]
H A Depic100.c266 unsigned int cur_tx, dirty_tx; member in struct:epic_private
789 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
885 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
886 dev->name, ep->dirty_tx, ep->cur_tx);
910 ep->dirty_tx = ep->cur_tx = 0;
965 free_count = ep->cur_tx - ep->dirty_tx;
966 entry = ep->cur_tx % TX_RING_SIZE;
987 ep->cur_tx++;
1029 unsigned int dirty_tx, cur_tx; local
1035 cur_tx
[all...]
H A Dlance.c248 int cur_rx, cur_tx; /* The next free ring entry */ member in struct:lance_private
860 lp->cur_rx = lp->cur_tx = 0;
924 printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
925 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
964 entry = lp->cur_tx & TX_RING_MOD_MASK;
999 lp->cur_tx++;
1007 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1046 while (dirty_tx < lp->cur_tx) {
1087 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1089 dirty_tx, lp->cur_tx,
[all...]
H A Dhamachi.c500 unsigned int cur_tx, dirty_tx; member in struct:hamachi_private
1015 for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
1126 hmp->cur_rx = hmp->cur_tx = 0;
1178 hmp->cur_rx = hmp->cur_tx = 0;
1270 printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
1284 entry = hmp->cur_tx % TX_RING_SIZE;
1336 hmp->cur_tx++;
1355 if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
1365 dev->name, hmp->cur_tx, entry);
1410 for (; hmp->cur_tx
[all...]
H A D3c515.c309 unsigned int cur_rx, cur_tx; /* The next free ring entry */ member in struct:corkscrew_private
829 vp->cur_tx = vp->dirty_tx = 0;
970 vp->cur_tx);
1003 int entry = vp->cur_tx % TX_RING_SIZE;
1010 if (vp->cur_tx != 0)
1011 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
1016 dev->name, vp->cur_tx);
1040 vp->cur_tx++;
1041 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
1167 while (lp->cur_tx
[all...]
H A Ddl2k.c540 np->cur_rx = np->cur_tx = 0;
604 entry = np->cur_tx % TX_RING_SIZE;
633 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
634 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
690 if (np->cur_tx != np->old_tx)
709 while (entry != np->cur_tx) {
737 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
1281 ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
1282 netif_queue_stopped(dev), np->cur_tx, n
[all...]
H A Dsonic.c293 int entry = lp->cur_tx;
297 /* At this point, cur_tx is the index of a TD that is one of:
343 lp->cur_tx = entry;
686 lp->cur_tx = lp->next_tx = 0;
H A Dvia-rhine.c391 unsigned int cur_tx, dirty_tx; member in struct:rhine_private
966 rp->dirty_tx = rp->cur_tx = 0;
1224 entry = rp->cur_tx % TX_RING_SIZE;
1267 rp->cur_tx++;
1276 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1285 dev->name, rp->cur_tx-1, entry);
1371 while (rp->dirty_tx != rp->cur_tx) {
1418 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
H A Dstarfire.c612 unsigned int cur_tx, dirty_tx, reap_tx; member in struct:netdev_private
1173 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1225 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1237 entry = np->cur_tx % TX_RING_SIZE;
1272 dev->name, np->cur_tx, np->dirty_tx,
1276 np->cur_tx += np->tx_info[entry].used_slots;
1280 np->cur_tx += np->tx_info[entry].used_slots;
1284 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1297 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1378 (np->cur_tx
[all...]
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/tulip/
H A Dtulip_core.c320 tp->cur_rx = tp->cur_tx = 0;
353 tp->tx_buffers[tp->cur_tx].skb = NULL;
354 tp->tx_buffers[tp->cur_tx].mapping = mapping;
357 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
358 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
359 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
361 tp->cur_tx++;
647 entry = tp->cur_tx % TX_RING_SIZE;
655 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
657 } else if (tp->cur_tx
[all...]
H A Dxircom_tulip_cb.c303 unsigned int cur_rx, cur_tx; /* The next free ring entry */ member in struct:xircom_private
862 tp->cur_rx = tp->cur_tx = 0;
913 entry = tp->cur_tx % TX_RING_SIZE;
924 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
926 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
928 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
940 tp->cur_tx++;
1081 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1118 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1120 dev->name, dirty_tx, tp->cur_tx, t
[all...]
H A Dinterrupt.c570 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
622 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
624 dev->name, dirty_tx, tp->cur_tx);
629 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
H A Dwinbond-840.c311 unsigned int cur_tx, dirty_tx; member in struct:netdev_private
832 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
944 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
1005 entry = np->cur_tx % TX_RING_SIZE;
1025 * increasing np->cur_tx and setting DescOwned:
1026 * - if np->cur_tx is increased first the interrupt
1031 * since the np->cur_tx was not yet increased.
1034 np->cur_tx++;
1041 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1053 dev->name, np->cur_tx, entr
[all...]
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/ppc/8260_io/
H A Denet.c97 * cur_rx and cur_tx point to the currently available buffer.
99 * controller. The cur_tx and dirty_tx are equal under both completely
113 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ member in struct:scc_enet_private
174 bdp = cep->cur_tx;
230 cep->cur_tx = (cbd_t *)bdp;
248 printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
249 cep->cur_tx, cep->tx_full ? " (full)" : "",
311 if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
358 * currently available BD (cur_tx) and determine if any
359 * buffers between the dirty_tx and cur_tx hav
[all...]
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/ppc/8xx_io/
H A Denet.c119 * cur_rx and cur_tx point to the currently available buffer.
121 * controller. The cur_tx and dirty_tx are equal under both completely
135 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ member in struct:scc_enet_private
202 bdp = cep->cur_tx;
264 cep->cur_tx = (cbd_t *)bdp;
282 printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
283 cep->cur_tx, cep->tx_full ? " (full)" : "",
343 if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
390 * currently available BD (cur_tx) and determine if any
391 * buffers between the dirty_tx and cur_tx hav
[all...]
H A Dfec.c141 * cur_rx and cur_tx point to the currently available buffer.
143 * controller. The cur_tx and dirty_tx are equal under both completely
157 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ member in struct:fec_enet_private
358 bdp = fep->cur_tx;
420 fep->cur_tx = (cbd_t *)bdp;
439 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
440 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
549 if (bdp == fep->cur_tx && fep->tx_full == 0) break;
1780 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/fs_enet/
H A Dfs_enet-main.c502 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
600 bdp = fep->cur_tx;
641 fep->cur_tx++;
643 fep->cur_tx = fep->tx_bd_base;
701 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);

Completed in 331 milliseconds

12