Searched refs:tx_ring (Results 1 - 25 of 86) sorted by path

1234

/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/powerpc/sysdev/
H A Dfsl_soc.c683 fs_enet_data.tx_ring = 32;
973 fs_enet_data.tx_ring = 16;
1002 fs_enet_data.tx_ring = 8;
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/ppc/platforms/
H A Dmpc8272ads_setup.c81 .tx_ring = 32,
99 .tx_ring = 32,
H A Dmpc866ads_setup.c53 .tx_ring = 16,
66 .tx_ring = 8,
H A Dmpc885ads_setup.c70 .tx_ring = 16,
83 .tx_ring = 16,
96 .tx_ring = 8,
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/ppc/syslib/
H A Dmpc85xx_devices.c93 .tx_ring = 32,
111 .tx_ring = 32,
129 .tx_ring = 32,
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/amso1100/
H A Dc2.c113 static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, argument
121 tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
122 if (!tx_ring->start)
125 elem = tx_ring->start;
128 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
143 if (i == tx_ring->count - 1) {
144 elem->next = tx_ring->start;
153 tx_ring->to_use = tx_ring
327 struct c2_ring *tx_ring = &c2_port->tx_ring; local
384 struct c2_ring *tx_ring = &c2_port->tx_ring; local
762 struct c2_ring *tx_ring = &c2_port->tx_ring; local
[all...]
H A Dc2.h365 struct c2_ring tx_ring; member in struct:c2_port
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/ulp/ipoib/
H A Dipoib.h185 struct ipoib_tx_buf *tx_ring; member in struct:ipoib_cm_tx
269 struct ipoib_tx_buf *tx_ring; member in struct:ipoib_dev_priv
H A Dipoib_cm.c517 * We put the skb into the tx_ring _before_ we call post_send()
523 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
570 tx_req = &tx->tx_ring[wr_id];
883 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring,
885 if (!p->tx_ring) {
972 if (p->tx_ring) {
974 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
981 kfree(p->tx_ring);
H A Dipoib_ib.c256 tx_req = &priv->tx_ring[wr_id];
378 * We put the skb into the tx_ring _before_ we call post_send()
384 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
603 tx_req = &priv->tx_ring[priv->tx_tail &
H A Dipoib_main.c890 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring,
892 if (!priv->tx_ring) {
906 kfree(priv->tx_ring);
931 kfree(priv->tx_ring);
934 priv->tx_ring = NULL;
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/ulp/srp/
H A Dib_srp.c247 srp_free_iu(target->srp_host, target->tx_ring[i]);
943 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
1068 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1071 if (!target->tx_ring[i])
1084 srp_free_iu(target->srp_host, target->tx_ring[i]);
1085 target->tx_ring[i] = NULL;
H A Dib_srp.h150 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; member in struct:srp_target_port
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/
H A D3c515.c278 alignment contraint on tx_ring[] and rx_ring[]. */
305 struct boom_tx_desc tx_ring[TX_RING_SIZE]; member in struct:corkscrew_private
972 &vp->tx_ring[0]);
975 &vp->tx_ring[i],
976 vp->tx_ring[i].length, vp->tx_ring[i].status);
1011 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
1019 vp->tx_ring[entry].next = 0;
1020 vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data);
1021 vp->tx_ring[entr
[all...]
H A D3c527.c173 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */ member in struct:mc32_local
815 * our tx_ring array to reduce slow shared-mem reads. Finally, we
832 lp->tx_ring[i].p=p;
833 lp->tx_ring[i].skb=NULL;
864 if (lp->tx_ring[i].skb)
866 dev_kfree_skb(lp->tx_ring[i].skb);
867 lp->tx_ring[i].skb = NULL;
1023 p=lp->tx_ring[head].p;
1028 np=lp->tx_ring[head].p;
1031 lp->tx_ring[hea
[all...]
H A D3c59x.c529 alignment contraint on tx_ring[] and rx_ring[]. */
587 struct boom_tx_desc* tx_ring; member in struct:vortex_private
1124 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
2052 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2071 vp->tx_ring[entry].next = 0;
2074 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2076 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2079 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
2081 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2085 vp->tx_ring[entr
[all...]
H A D8139cp.c357 struct cp_desc *tx_ring; member in struct:cp_private
701 struct cp_desc *txd = cp->tx_ring + tx_tail;
786 struct cp_desc *txd = &cp->tx_ring[entry];
862 txd = &cp->tx_ring[entry];
874 txd = &cp->tx_ring[first_entry];
1107 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1108 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1124 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1147 desc = cp->tx_ring + i;
1158 memset(cp->tx_ring,
[all...]
H A Dacenic.c757 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
759 pci_free_consistent(ap->pdev, size, ap->tx_ring,
762 ap->tx_ring = NULL;
817 ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
820 if (ap->tx_ring == NULL)
1296 ap->tx_ring = (struct tx_desc *) regs->Window;
1299 writel(0, (void __iomem *)ap->tx_ring + i * 4);
1303 memset(ap->tx_ring, 0,
2322 = (struct tx_desc __iomem *) &ap->tx_ring[i];
2327 memset(ap->tx_ring
[all...]
H A Dacenic.h628 struct tx_desc *tx_ring; member in struct:ace_private
H A Damd8111e.c320 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
360 lp->tx_ring[i].buff_phy_addr = 0;
361 lp->tx_ring[i].tx_flags = 0;
362 lp->tx_ring[i].buff_count = 0;
376 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
659 if(lp->tx_ring){
662 lp->tx_ring, lp->tx_ring_dma_addr);
664 lp->tx_ring = NULL;
693 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
698 lp->tx_ring[tx_inde
[all...]
H A Damd8111e.h750 struct amd8111e_tx_dr* tx_ring; member in struct:amd8111e_priv
H A Dariadne.c95 volatile struct TDRE *tx_ring[TX_RING_SIZE]; member in struct:ariadne_private
111 struct TDRE tx_ring[TX_RING_SIZE]; member in struct:lancedata
289 lance->RDP = swloww(ARIADNE_RAM+offsetof(struct lancedata, tx_ring));
291 lance->RDP = swhighw(ARIADNE_RAM+offsetof(struct lancedata, tx_ring));
344 volatile struct TDRE *t = &lancedata->tx_ring[i];
350 priv->tx_ring[i] = &lancedata->tx_ring[i];
444 int status = lowb(priv->tx_ring[entry]->TMD1);
449 priv->tx_ring[entry]->TMD1 &= 0xff00;
453 int err_status = priv->tx_ring[entr
[all...]
H A Datarilance.c152 struct ringdesc tx_ring; member in struct:lance_init_block
619 MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
620 MEM->init.tx_ring.adr_hi = 0;
621 MEM->init.tx_ring.len = TX_RING_LEN_BITS;
H A Db44.c995 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
996 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1000 entry * sizeof(bp->tx_ring[0]),
1109 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1149 if (bp->tx_ring) {
1154 kfree(bp->tx_ring);
1157 bp->tx_ring, bp->tx_ring_dma);
1158 bp->tx_ring = NULL;
1209 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1210 if (!bp->tx_ring) {
1214 struct dma_desc *tx_ring; local
[all...]
H A Db44.h418 struct dma_desc *rx_ring, *tx_ring; member in struct:b44

Completed in 233 milliseconds

1234