Searched refs:TX_RING_SIZE (Results 1 - 25 of 51) sorted by path

123

/linux-master/drivers/net/ethernet/dlink/
H A Ddl2k.h35 #define TX_RING_SIZE 256 macro
36 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
38 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
H A Ddl2k.c210 else if (tx_coalesce > TX_RING_SIZE-1)
211 tx_coalesce = TX_RING_SIZE - 1;
450 for (i = 0; i < TX_RING_SIZE; i++) {
471 for (i = 0; i < TX_RING_SIZE; i++)
488 for (i = 0; i < TX_RING_SIZE; i++) {
491 ((i + 1) % TX_RING_SIZE) *
717 entry = np->cur_tx % TX_RING_SIZE;
753 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
754 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
[all...]
H A Dsundance.c65 #define TX_RING_SIZE 32 macro
66 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
69 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
128 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
366 struct sk_buff* tx_skbuff[TX_RING_SIZE];
977 for (i=0; i<TX_RING_SIZE; i++) {
990 np->cur_tx, np->cur_tx % TX_RING_SIZE,
991 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1055 for (i = 0; i < TX_RING_SIZE; i++) {
1064 unsigned head = np->cur_task % TX_RING_SIZE;
[all...]
/linux-master/drivers/net/ethernet/pasemi/
H A Dpasemi_mac.h19 #define TX_RING_SIZE 4096 macro
20 #define CS_RING_SIZE (TX_RING_SIZE*2)
94 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
95 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
/linux-master/drivers/net/ethernet/sun/
H A Dsunbmac.h251 #define TX_RING_SIZE 256 macro
255 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
257 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
261 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
300 struct sk_buff *tx_skbs[TX_RING_SIZE];
H A Dsungem.h883 #define TX_RING_SIZE 128 macro
886 #if TX_RING_SIZE == 32
888 #elif TX_RING_SIZE == 64
890 #elif TX_RING_SIZE == 128
892 #elif TX_RING_SIZE == 256
894 #elif TX_RING_SIZE == 512
896 #elif TX_RING_SIZE == 1024
898 #elif TX_RING_SIZE == 2048
900 #elif TX_RING_SIZE == 4096
902 #elif TX_RING_SIZE
[all...]
H A Dsunqe.h291 #define TX_RING_SIZE 16 macro
301 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
326 u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
/linux-master/drivers/net/ethernet/3com/
H A D3c515.c48 #define TX_RING_SIZE 16 macro
299 struct boom_tx_desc tx_ring[TX_RING_SIZE];
302 struct sk_buff *tx_skbuff[TX_RING_SIZE];
832 for (i = 0; i < TX_RING_SIZE; i++)
970 for (i = 0; i < TX_RING_SIZE; i++) {
1000 int entry = vp->cur_tx % TX_RING_SIZE;
1008 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
1038 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
1163 int entry = dirty_tx % TX_RING_SIZE;
1174 if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE
[all...]
H A D3c59x.c38 #define TX_RING_SIZE 16 macro
603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
1212 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1478 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1685 for (i = 0; i < TX_RING_SIZE; i++)
1915 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1917 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
2116 int entry = vp->cur_tx % TX_RING_SIZE;
2118 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2137 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
[all...]
/linux-master/drivers/net/ethernet/adaptec/
H A Dstarfire.c121 #define TX_RING_SIZE 32 macro
208 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
531 struct tx_ring_info tx_info[TX_RING_SIZE];
888 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1173 for (i = 0; i < TX_RING_SIZE; i++)
1190 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1203 entry = np->cur_tx % TX_RING_SIZE;
1211 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1250 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1258 /* scavenge the tx descriptors twice per TX_RING_SIZE */
[all...]
/linux-master/drivers/net/ethernet/alteon/
H A Dacenic.h449 #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)) macro
/linux-master/drivers/net/ethernet/amd/
H A D7990.c104 for (t = 0; t < TX_RING_SIZE; t++) { \
H A D7990.h39 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro
41 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
90 volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
H A Da2065.c72 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro
75 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
97 struct lance_tx_desc btx_ring[TX_RING_SIZE];
100 char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
H A Dariadne.c80 #define TX_RING_SIZE 5 macro
88 volatile struct TDRE *tx_ring[TX_RING_SIZE];
90 volatile u_short *tx_buff[TX_RING_SIZE];
100 struct TDRE tx_ring[TX_RING_SIZE];
102 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
129 for (i = 0; i < TX_RING_SIZE; i++) {
309 int entry = dirty_tx % TX_RING_SIZE;
345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
349 dirty_tx += TX_RING_SIZE;
354 dirty_tx > priv->cur_tx - TX_RING_SIZE
[all...]
H A Datarilance.c111 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
113 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
157 struct lance_tx_head tx_head[TX_RING_SIZE];
704 for( i = 0; i < TX_RING_SIZE; i++ ) {
755 for( i = 0 ; i < TX_RING_SIZE; i++ )
829 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
830 lp->cur_tx -= TX_RING_SIZE;
831 lp->dirty_tx -= TX_RING_SIZE;
920 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
[all...]
H A Ddeclance.c156 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
157 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
223 struct lance_tx_desc btx_ring[TX_RING_SIZE];
268 char *tx_buf_ptr_cpu[TX_RING_SIZE];
272 uint tx_buf_ptr_lnc[TX_RING_SIZE];
495 for (i = 0; i < TX_RING_SIZE; i++) {
1095 for (i = 0; i < TX_RING_SIZE; i++) {
1140 for (i = 0; i < TX_RING_SIZE; i++) {
1171 for (i = 0; i < TX_RING_SIZE; i++) {
H A Dlance.c194 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
195 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
239 struct lance_tx_head tx_ring[TX_RING_SIZE];
243 struct sk_buff* tx_skbuff[TX_RING_SIZE];
564 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
855 for (i = 0; i < TX_RING_SIZE; i++) {
892 for (i = 0; i < TX_RING_SIZE; i++) {
941 for (i = 0; i < TX_RING_SIZE; i++)
1016 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1099 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
[all...]
H A Dpcnet32.c167 #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) macro
1856 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
H A Dsun3lance.c94 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
96 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
142 struct lance_tx_head tx_head[TX_RING_SIZE];
145 char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
459 for( i = 0; i < TX_RING_SIZE; i++ ) {
547 for( i = 0 ; i < TX_RING_SIZE; i++ )
680 // for(i = 0; i < TX_RING_SIZE; i++)
H A Dsunlance.c176 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
177 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
223 struct lance_tx_desc btx_ring[TX_RING_SIZE];
225 u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
340 for (i = 0; i < TX_RING_SIZE; i++) {
396 for (i = 0; i < TX_RING_SIZE; i++) {
/linux-master/drivers/net/ethernet/dec/tulip/
H A Dinterrupt.c533 int maxtx = TX_RING_SIZE;
534 int maxoi = TX_RING_SIZE;
597 int entry = dirty_tx % TX_RING_SIZE;
652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
656 dirty_tx += TX_RING_SIZE;
660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
H A Dtulip.h326 #define TX_RING_SIZE 32 macro
409 struct ring_info tx_buffers[TX_RING_SIZE];
H A Dtulip_core.c587 for (i = 0; i < TX_RING_SIZE; i++)
643 for (i = 0; i < TX_RING_SIZE; i++) {
664 entry = tp->cur_tx % TX_RING_SIZE;
672 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
674 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
676 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
682 if (entry == TX_RING_SIZE-1)
707 int entry = dirty_tx % TX_RING_SIZE;
805 for (i = 0; i < TX_RING_SIZE; i++) {
1129 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE
[all...]
H A Dwinbond-840.c288 dma_addr_t tx_addr[TX_RING_SIZE];
293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
816 for (i = 0; i < TX_RING_SIZE; i++) {
843 for (i = 0; i < TX_RING_SIZE; i++) {
927 for (i = 0; i < TX_RING_SIZE; i++)
966 sizeof(struct w840_tx_desc) * TX_RING_SIZE,
978 sizeof(struct w840_tx_desc) * TX_RING_SIZE,
992 entry = np->cur_tx % TX_RING_SIZE;
1007 if(entry == TX_RING_SIZE-1)
1049 int entry = np->dirty_tx % TX_RING_SIZE;
[all...]

Completed in 376 milliseconds

123