• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/wireless/

Lines Matching defs:rxq

3333 				      struct ipw_rx_queue *rxq)
3338 spin_lock_irqsave(&rxq->lock, flags);
3340 INIT_LIST_HEAD(&rxq->rx_free);
3341 INIT_LIST_HEAD(&rxq->rx_used);
3347 if (rxq->pool[i].skb != NULL) {
3348 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3350 dev_kfree_skb(rxq->pool[i].skb);
3351 rxq->pool[i].skb = NULL;
3353 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3358 rxq->read = rxq->write = 0;
3359 rxq->processed = RX_QUEUE_SIZE - 1;
3360 rxq->free_count = 0;
3361 spin_unlock_irqrestore(&rxq->lock, flags);
3428 if (!priv->rxq)
3429 priv->rxq = ipw_rx_queue_alloc(priv);
3431 ipw_rx_queue_reset(priv, priv->rxq);
3432 if (!priv->rxq) {
3548 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3559 if (priv->rxq) {
3560 ipw_rx_queue_free(priv, priv->rxq);
3561 priv->rxq = NULL;
4984 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4985 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4986 * to replensish the ipw->rxq->rx_free.
4988 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4991 * detached from the ipw->rxq. The driver 'processed' index is updated.
4992 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4993 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5029 struct ipw_rx_queue *rxq = priv->rxq;
5035 spin_lock_irqsave(&rxq->lock, flags);
5036 write = rxq->write;
5037 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5038 element = rxq->rx_free.next;
5042 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5044 rxq->queue[rxq->write] = rxb;
5045 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5046 rxq->free_count--;
5048 spin_unlock_irqrestore(&rxq->lock, flags);
5052 if (rxq->free_count <= RX_LOW_WATERMARK)
5056 if (write != rxq->write)
5057 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5069 struct ipw_rx_queue *rxq = priv->rxq;
5074 spin_lock_irqsave(&rxq->lock, flags);
5075 while (!list_empty(&rxq->rx_used)) {
5076 element = rxq->rx_used.next;
5093 list_add_tail(&rxb->list, &rxq->rx_free);
5094 rxq->free_count++;
5096 spin_unlock_irqrestore(&rxq->lock, flags);
5115 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5119 if (!rxq)
5123 if (rxq->pool[i].skb != NULL) {
5124 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5126 dev_kfree_skb(rxq->pool[i].skb);
5130 kfree(rxq);
5135 struct ipw_rx_queue *rxq;
5138 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5139 if (unlikely(!rxq)) {
5143 spin_lock_init(&rxq->lock);
5144 INIT_LIST_HEAD(&rxq->rx_free);
5145 INIT_LIST_HEAD(&rxq->rx_used);
5149 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5153 rxq->read = rxq->write = 0;
5154 rxq->processed = RX_QUEUE_SIZE - 1;
5155 rxq->free_count = 0;
5157 return rxq;
8194 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8197 rxb = priv->rxq->queue[i];
8202 priv->rxq->queue[i] = NULL;
8363 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8369 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
11707 if (priv->rxq) {
11708 ipw_rx_queue_free(priv, priv->rxq);
11709 priv->rxq = NULL;