Searched refs:TX_RING_SIZE (Results 1 - 25 of 55) sorted by path

123

/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/ppc/8260_io/
H A Denet.c86 #define TX_RING_SIZE 8 /* Must be power of two */ macro
105 struct sk_buff* tx_skbuff[TX_RING_SIZE];
253 for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
686 dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
749 for (i=0; i<TX_RING_SIZE; i++) {
H A Dfcc_enet.c121 #define TX_RING_SIZE 16 /* Must be power of two */ macro
364 struct sk_buff* tx_skbuff[TX_RING_SIZE];
503 for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
566 if (cep->tx_free == TX_RING_SIZE)
1792 cep->tx_bd_base = kmalloc(sizeof(cbd_t) * TX_RING_SIZE,
1923 for (i=0; i<TX_RING_SIZE; i++) {
2223 fep->tx_free = TX_RING_SIZE;
2228 for (i=0; i<TX_RING_SIZE; i++) {
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/ppc/8xx_io/
H A Denet.c100 #define TX_RING_SIZE 64 /* Must be power of two */ macro
107 #define TX_RING_SIZE 8 /* Must be power of two */ macro
127 struct sk_buff* tx_skbuff[TX_RING_SIZE];
286 for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
752 dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
824 for (i=0; i<TX_RING_SIZE; i++) {
H A Dfec.c89 #define TX_RING_SIZE 16 /* Must be power of two */ macro
96 #define TX_RING_SIZE 8 /* Must be power of two */ macro
149 struct sk_buff* tx_skbuff[TX_RING_SIZE];
445 printk(" tx: %u buffers\n", TX_RING_SIZE);
446 for (i = 0 ; i < TX_RING_SIZE; i++) {
1573 if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) {
1812 for (i=0; i<TX_RING_SIZE; i++) {
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/
H A D3c515.c56 #define TX_RING_SIZE 16 macro
305 struct boom_tx_desc tx_ring[TX_RING_SIZE];
308 struct sk_buff *tx_skbuff[TX_RING_SIZE];
832 for (i = 0; i < TX_RING_SIZE; i++)
973 for (i = 0; i < TX_RING_SIZE; i++) {
1003 int entry = vp->cur_tx % TX_RING_SIZE;
1011 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
1041 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
1168 int entry = dirty_tx % TX_RING_SIZE;
1179 if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE
[all...]
H A D3c59x.c30 #define TX_RING_SIZE 16 macro
592 struct sk_buff* tx_skbuff[TX_RING_SIZE];
1118 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1404 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1624 for (i = 0; i < TX_RING_SIZE; i++)
1860 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1862 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
2051 int entry = vp->cur_tx % TX_RING_SIZE;
2052 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2061 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
[all...]
H A D7990.c106 for (t=0; t < TX_RING_SIZE; t++) { \
H A D7990.h38 #define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS) macro
40 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
90 volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
H A D82596.c283 #define TX_RING_SIZE 64 macro
331 struct tx_cmd tx_cmds[TX_RING_SIZE];
332 struct i596_tbd tbds[TX_RING_SIZE];
370 static int max_cmd_backlog = TX_RING_SIZE-1;
1087 if (++lp->next_tx_cmd == TX_RING_SIZE)
H A Da2065.c70 #define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS) macro
73 #define TX_RING_MOD_MASK (TX_RING_SIZE-1)
99 struct lance_tx_desc btx_ring[TX_RING_SIZE];
102 char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
H A Dacenic.h447 #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)) macro
H A Dariadne.c84 #define TX_RING_SIZE 5 macro
95 volatile struct TDRE *tx_ring[TX_RING_SIZE];
97 volatile u_short *tx_buff[TX_RING_SIZE];
111 struct TDRE tx_ring[TX_RING_SIZE];
113 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE/sizeof(u_short)];
303 lance->RDP = swapw(((u_short)-TX_RING_SIZE));
343 for (i = 0; i < TX_RING_SIZE; i++) {
443 int entry = dirty_tx % TX_RING_SIZE;
479 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
482 dirty_tx += TX_RING_SIZE;
[all...]
H A Datarilance.c112 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
114 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
158 struct lance_tx_head tx_head[TX_RING_SIZE];
714 for( i = 0; i < TX_RING_SIZE; i++ ) {
765 for( i = 0 ; i < TX_RING_SIZE; i++ )
843 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
844 lp->cur_tx -= TX_RING_SIZE;
845 lp->dirty_tx -= TX_RING_SIZE;
934 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
[all...]
H A Ddeclance.c156 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
157 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
223 struct lance_tx_desc btx_ring[TX_RING_SIZE];
269 char *tx_buf_ptr_cpu[TX_RING_SIZE];
273 uint tx_buf_ptr_lnc[TX_RING_SIZE];
487 for (i = 0; i < TX_RING_SIZE; i++) {
1092 for (i = 0; i < TX_RING_SIZE; i++) {
1137 for (i = 0; i < TX_RING_SIZE; i++) {
1168 for (i = 0; i < TX_RING_SIZE; i++) {
H A Ddl2k.c198 else if (tx_coalesce > TX_RING_SIZE-1)
199 tx_coalesce = TX_RING_SIZE - 1;
545 for (i = 0; i < TX_RING_SIZE; i++) {
549 ((i+1)%TX_RING_SIZE) *
604 entry = np->cur_tx % TX_RING_SIZE;
633 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
634 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
699 int entry = np->old_tx % TX_RING_SIZE;
724 entry = (entry + 1) % TX_RING_SIZE;
[all...]
H A Ddl2k.h38 #define TX_RING_SIZE 256 macro
39 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
41 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
649 struct sk_buff *tx_skbuff[TX_RING_SIZE];
H A Deepro100.c71 #define TX_RING_SIZE 64 macro
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
415 struct sk_buff *tx_skbuff[TX_RING_SIZE];
626 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
805 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
806 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
1095 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1099 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1107 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
[all...]
H A Depic100.c53 #define TX_RING_SIZE 256 macro
56 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
255 struct sk_buff* tx_skbuff[TX_RING_SIZE];
813 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
940 for (i = 0; i < TX_RING_SIZE; i++) {
966 entry = ep->cur_tx % TX_RING_SIZE;
1038 int entry = dirty_tx % TX_RING_SIZE;
1060 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1064 dirty_tx += TX_RING_SIZE;
1334 for (i = 0; i < TX_RING_SIZE;
[all...]
H A Dfealnx.c56 // #define TX_RING_SIZE 16
58 #define TX_RING_SIZE 6 macro
60 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
1221 for (i = 0; i < TX_RING_SIZE; i++)
1288 np->free_tx_count = TX_RING_SIZE;
1290 for (i = 0; i < TX_RING_SIZE; i++) {
1393 np->free_tx_count = TX_RING_SIZE;
1395 for (i = 0; i < TX_RING_SIZE; i++) {
1411 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1412 np->tx_ring[TX_RING_SIZE
[all...]
H A Dfec.c142 #define TX_RING_SIZE 16 /* Must be power of two */ macro
145 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
194 unsigned char *tx_bounce[TX_RING_SIZE];
195 struct sk_buff* tx_skbuff[TX_RING_SIZE];
422 printk(" tx: %u buffers\n", TX_RING_SIZE);
423 for (i = 0 ; i < TX_RING_SIZE; i++) {
2368 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
2517 for (i=0; i<TX_RING_SIZE; i++) {
H A Dgt64240eth.c580 nextOut = (nextOut + 1) % TX_RING_SIZE) {
912 sizeof(gt64240_td_t) * TX_RING_SIZE,
931 sizeof(gt64240_td_t) * TX_RING_SIZE,
950 sizeof(gt64240_td_t) * TX_RING_SIZE,
1000 for (i = 0; i < TX_RING_SIZE; i++) {
1284 nextOut = (nextOut + 1) % TX_RING_SIZE) {
1369 if (gp->tx_count >= TX_RING_SIZE) {
1413 gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
1442 if (++gp->tx_count >= TX_RING_SIZE) {
H A Dgt64240eth.h83 //-#define TX_RING_SIZE 16
84 #define TX_RING_SIZE 64 /* TESTING !!! */ macro
377 struct sk_buff *tx_skbuff[TX_RING_SIZE];
H A Dhamachi.c119 #define TX_RING_SIZE 64 macro
121 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
235 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
491 struct sk_buff* tx_skbuff[TX_RING_SIZE];
1016 int entry = hmp->dirty_tx % TX_RING_SIZE;
1031 if (entry >= TX_RING_SIZE-1)
1032 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1080 for (i = 0; i < TX_RING_SIZE; i++)
1102 for (i = 0; i < TX_RING_SIZE; i++){
1105 if (i >= TX_RING_SIZE
[all...]
H A Dlance.c193 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
194 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
238 struct lance_tx_head tx_ring[TX_RING_SIZE];
242 struct sk_buff* tx_skbuff[TX_RING_SIZE];
548 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
844 for (i = 0; i < TX_RING_SIZE; i++) {
882 for (i = 0; i < TX_RING_SIZE; i++) {
931 for (i = 0; i < TX_RING_SIZE; i++)
1007 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1087 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
[all...]
H A Dlasi_82596.c330 #define TX_RING_SIZE 32 macro
369 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
370 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
416 static int max_cmd_backlog = TX_RING_SIZE-1;
1069 if (++lp->next_tx_cmd == TX_RING_SIZE)

Completed in 226 milliseconds

123