• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/wireless/ipw2x00/

Lines Matching defs:rxq

3413 				      struct ipw_rx_queue *rxq)
3418 spin_lock_irqsave(&rxq->lock, flags);
3420 INIT_LIST_HEAD(&rxq->rx_free);
3421 INIT_LIST_HEAD(&rxq->rx_used);
3427 if (rxq->pool[i].skb != NULL) {
3428 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3430 dev_kfree_skb(rxq->pool[i].skb);
3431 rxq->pool[i].skb = NULL;
3433 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3438 rxq->read = rxq->write = 0;
3439 rxq->free_count = 0;
3440 spin_unlock_irqrestore(&rxq->lock, flags);
3507 if (!priv->rxq)
3508 priv->rxq = ipw_rx_queue_alloc(priv);
3510 ipw_rx_queue_reset(priv, priv->rxq);
3511 if (!priv->rxq) {
3627 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3638 if (priv->rxq) {
3639 ipw_rx_queue_free(priv, priv->rxq);
3640 priv->rxq = NULL;
5108 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5109 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5110 * to replensish the ipw->rxq->rx_free.
5112 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5115 * detached from the ipw->rxq. The driver 'processed' index is updated.
5116 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5117 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5153 struct ipw_rx_queue *rxq = priv->rxq;
5159 spin_lock_irqsave(&rxq->lock, flags);
5160 write = rxq->write;
5161 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5162 element = rxq->rx_free.next;
5166 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5168 rxq->queue[rxq->write] = rxb;
5169 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5170 rxq->free_count--;
5172 spin_unlock_irqrestore(&rxq->lock, flags);
5176 if (rxq->free_count <= RX_LOW_WATERMARK)
5180 if (write != rxq->write)
5181 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5193 struct ipw_rx_queue *rxq = priv->rxq;
5198 spin_lock_irqsave(&rxq->lock, flags);
5199 while (!list_empty(&rxq->rx_used)) {
5200 element = rxq->rx_used.next;
5217 list_add_tail(&rxb->list, &rxq->rx_free);
5218 rxq->free_count++;
5220 spin_unlock_irqrestore(&rxq->lock, flags);
5239 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5243 if (!rxq)
5247 if (rxq->pool[i].skb != NULL) {
5248 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5250 dev_kfree_skb(rxq->pool[i].skb);
5254 kfree(rxq);
5259 struct ipw_rx_queue *rxq;
5262 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5263 if (unlikely(!rxq)) {
5267 spin_lock_init(&rxq->lock);
5268 INIT_LIST_HEAD(&rxq->rx_free);
5269 INIT_LIST_HEAD(&rxq->rx_used);
5273 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5277 rxq->read = rxq->write = 0;
5278 rxq->free_count = 0;
5280 return rxq;
8363 i = priv->rxq->read;
8365 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8369 rxb = priv->rxq->queue[i];
8374 priv->rxq->queue[i] = NULL;
8532 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8539 priv->rxq->read = i;
8545 priv->rxq->read = i;
11880 if (priv->rxq) {
11881 ipw_rx_queue_free(priv, priv->rxq);
11882 priv->rxq = NULL;