Lines Matching refs:rxb

222 				struct iwl_rx_mem_buffer *rxb)
229 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
234 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
238 (u32)rxb->vid, rxq->id, rxq->write);
248 struct iwl_rx_mem_buffer *rxb;
264 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
266 list_del(&rxb->list);
267 rxb->invalid = false;
269 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
271 iwl_pcie_restock_bd(trans, rxq, rxb);
294 struct iwl_rx_mem_buffer *rxb;
310 /* The overwritten rxb must be a used one */
311 rxb = rxq->queue[rxq->write];
312 BUG_ON(rxb && rxb->page);
315 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
317 list_del(&rxb->list);
318 rxb->invalid = false;
321 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
322 rxq->queue[rxq->write] = rxb;
433 struct iwl_rx_mem_buffer *rxb;
457 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
459 list_del(&rxb->list);
462 BUG_ON(rxb->page);
463 rxb->page = page;
464 rxb->offset = offset;
466 rxb->page_dma =
467 dma_map_page(trans->dev, page, rxb->offset,
470 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
471 rxb->page = NULL;
473 list_add(&rxb->list, &rxq->rx_used);
481 list_add_tail(&rxb->list, &rxq->rx_free);
538 struct iwl_rx_mem_buffer *rxb;
547 /* Get the first rxb from the rbd list */
548 rxb = list_first_entry(&local_empty,
550 BUG_ON(rxb->page);
553 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
557 rxb->page = page;
560 rxb->page_dma = dma_map_page(trans->dev, page,
561 rxb->offset,
564 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
565 rxb->page = NULL;
571 list_move(&rxb->list, &local_allocated);
636 struct iwl_rx_mem_buffer *rxb =
640 list_move(&rxb->list, &rxq->rx_free);
1149 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1152 list_add(&rxb->list, &rba->rbd_empty);
1154 list_add(&rxb->list, &def_rxq->rx_used);
1155 trans_pcie->global_table[i] = rxb;
1156 rxb->vid = (u16)(i + 1);
1157 rxb->invalid = true;
1261 struct iwl_rx_mem_buffer *rxb,
1269 list_add_tail(&rxb->list, &rxq->rx_used);
1294 struct iwl_rx_mem_buffer *rxb,
1304 if (WARN_ON(!rxb))
1307 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1314 ._offset = rxb->offset + offset,
1316 ._page = rxb->page,
1402 IWL_WARN(trans, "Claim null rxb?\n");
1412 __free_pages(rxb->page, trans_pcie->rx_page_order);
1413 rxb->page = NULL;
1419 if (rxb->page != NULL) {
1420 rxb->page_dma =
1421 dma_map_page(trans->dev, rxb->page, rxb->offset,
1424 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1430 __free_pages(rxb->page, trans_pcie->rx_page_order);
1431 rxb->page = NULL;
1432 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1434 list_add_tail(&rxb->list, &rxq->rx_free);
1438 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1446 struct iwl_rx_mem_buffer *rxb;
1453 rxb = rxq->queue[i];
1455 return rxb;
1477 rxb = trans_pcie->global_table[vid - 1];
1478 if (rxb->invalid)
1481 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1483 rxb->invalid = true;
1485 return rxb;
1488 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1524 struct iwl_rx_mem_buffer *rxb;
1542 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1543 if (!rxb)
1560 list_add_tail(&rxb->list, &rxq->rx_free);
1563 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);