Lines Matching refs:rxr

1518 	struct rx_ring	*rxr = adapter->rx_rings;
1540 em_rxeof(rxr, count, &rx_done);
1611 struct rx_ring *rxr = adapter->rx_rings;
1614 bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1675 struct rx_ring *rxr = arg;
1676 struct adapter *adapter = rxr->adapter;
1679 ++rxr->rx_irq;
1682 more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1684 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1687 E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
1731 struct rx_ring *rxr = context;
1732 struct adapter *adapter = rxr->adapter;
1735 more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1737 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1740 E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
2332 struct rx_ring *rxr = adapter->rx_rings;
2347 for (int i = 0; i < adapter->num_queues; i++, rxr++)
2348 trigger |= rxr->ims;
2349 rxr = adapter->rx_rings;
2637 struct rx_ring *rxr = adapter->rx_rings;
2646 for (int i = 0; i < adapter->num_queues; i++, rxr++, vector++) {
2651 rxr->res = bus_alloc_resource_any(dev,
2653 if (rxr->res == NULL) {
2659 if ((error = bus_setup_intr(dev, rxr->res,
2661 rxr, &rxr->tag)) != 0) {
2666 bus_describe_intr(dev, rxr->res, rxr->tag, "rx%d", i);
2668 rxr->msix = vector;
2673 bus_bind_intr(dev, rxr->res, cpu_id);
2675 TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr);
2676 rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT,
2677 taskqueue_thread_enqueue, &rxr->tq);
2678 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq (cpuid %d)",
2686 rxr->ims = 1 << (20 + i);
2687 adapter->ims |= rxr->ims;
2688 adapter->ivars |= (8 | rxr->msix) << (i * 4);
2772 struct rx_ring *rxr;
2793 rxr = &adapter->rx_rings[i];
2795 if (rxr == NULL)
2797 rid = rxr->msix +1;
2798 if (rxr->tag != NULL) {
2799 bus_teardown_intr(dev, rxr->res, rxr->tag);
2800 rxr->tag = NULL;
2802 if (rxr->res != NULL)
2804 rid, rxr->res);
3430 struct rx_ring *rxr = NULL;
3500 rxr = &adapter->rx_rings[i];
3501 rxr->adapter = adapter;
3502 rxr->me = i;
3505 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3507 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
3510 &rxr->rxdma, BUS_DMA_NOWAIT)) {
3516 rxr->rx_base = (union e1000_rx_desc_extended *)rxr->rxdma.dma_vaddr;
3517 bzero((void *)rxr->rx_base, rsize);
3520 if (em_allocate_receive_buffers(rxr)) {
3531 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
3532 em_dma_free(adapter, &rxr->rxdma);
4258 em_refresh_mbufs(struct rx_ring *rxr, int limit)
4260 struct adapter *adapter = rxr->adapter;
4267 i = j = rxr->next_to_refresh;
4277 rxbuf = &rxr->rx_buffers[i];
4297 error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map,
4308 bus_dmamap_sync(rxr->rxtag,
4310 em_setup_rxdesc(&rxr->rx_base[i], rxbuf);
4314 rxr->next_to_refresh = i;
4326 E1000_RDT(rxr->me), rxr->next_to_refresh);
4341 em_allocate_receive_buffers(struct rx_ring *rxr)
4343 struct adapter *adapter = rxr->adapter;
4348 rxr->rx_buffers = malloc(sizeof(struct em_rxbuffer) *
4350 if (rxr->rx_buffers == NULL) {
4366 &rxr->rxtag);
4373 rxbuf = rxr->rx_buffers;
4375 rxbuf = &rxr->rx_buffers[i];
4376 error = bus_dmamap_create(rxr->rxtag, 0, &rxbuf->map);
4398 em_setup_receive_ring(struct rx_ring *rxr)
4400 struct adapter *adapter = rxr->adapter;
4411 EM_RX_LOCK(rxr);
4414 bzero((void *)rxr->rx_base, rsize);
4416 slot = netmap_reset(na, NR_RX, rxr->me, 0);
4423 rxbuf = &rxr->rx_buffers[i];
4425 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4427 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4435 rxbuf = &rxr->rx_buffers[j];
4438 int si = netmap_idx_n2k(na->rx_rings[rxr->me], j);
4443 netmap_load_map(na, rxr->rxtag, rxbuf->map, addr);
4445 em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
4460 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
4468 bus_dmamap_sync(rxr->rxtag,
4472 em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
4474 rxr->next_to_check = 0;
4475 rxr->next_to_refresh = 0;
4476 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4480 EM_RX_UNLOCK(rxr);
4492 struct rx_ring *rxr = adapter->rx_rings;
4495 for (q = 0; q < adapter->num_queues; q++, rxr++)
4496 if (em_setup_receive_ring(rxr))
4507 rxr = &adapter->rx_rings[i];
4510 rxbuf = &rxr->rx_buffers[n];
4512 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4514 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4519 rxr->next_to_check = 0;
4520 rxr->next_to_refresh = 0;
4534 struct rx_ring *rxr = adapter->rx_rings;
4536 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4537 em_free_receive_buffers(rxr);
4539 em_dma_free(adapter, &rxr->rxdma);
4540 EM_RX_LOCK_DESTROY(rxr);
4553 em_free_receive_buffers(struct rx_ring *rxr)
4555 struct adapter *adapter = rxr->adapter;
4560 if (rxr->rx_buffers != NULL) {
4562 rxbuf = &rxr->rx_buffers[i];
4564 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4566 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4567 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
4574 free(rxr->rx_buffers, M_DEVBUF);
4575 rxr->rx_buffers = NULL;
4576 rxr->next_to_check = 0;
4577 rxr->next_to_refresh = 0;
4580 if (rxr->rxtag != NULL) {
4581 bus_dma_tag_destroy(rxr->rxtag);
4582 rxr->rxtag = NULL;
4598 struct rx_ring *rxr = adapter->rx_rings;
4725 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4727 u64 bus_addr = rxr->rxdma.dma_paddr;
4812 em_rxeof(struct rx_ring *rxr, int count, int *done)
4814 struct adapter *adapter = rxr->adapter;
4823 EM_RX_LOCK(rxr);
4826 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4831 if (netmap_rx_irq(ifp, rxr->me, &processed)) {
4832 EM_RX_UNLOCK(rxr);
4837 for (i = rxr->next_to_check, processed = 0; count != 0;) {
4841 cur = &rxr->rx_base[i];
4852 (rxr->discard == TRUE)) {
4854 ++rxr->rx_discarded;
4856 rxr->discard = TRUE;
4858 rxr->discard = FALSE;
4859 em_rx_discard(rxr, i);
4862 bus_dmamap_unload(rxr->rxtag, rxr->rx_buffers[i].map);
4865 mp = rxr->rx_buffers[i].m_head;
4869 rxr->rx_buffers[i].m_head = NULL;
4872 if (rxr->fmp == NULL) {
4874 rxr->fmp = rxr->lmp = mp;
4878 rxr->lmp->m_next = mp;
4879 rxr->lmp = mp;
4880 rxr->fmp->m_pkthdr.len += len;
4885 sendmp = rxr->fmp;
4892 em_fixup_rx(rxr) != 0)
4903 rxr->fmp = rxr->lmp = NULL;
4907 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4921 rxr->next_to_check = i;
4922 EM_RX_UNLOCK(rxr);
4924 EM_RX_LOCK(rxr);
4925 i = rxr->next_to_check;
4930 em_refresh_mbufs(rxr, i);
4936 if (e1000_rx_unrefreshed(rxr))
4937 em_refresh_mbufs(rxr, i);
4939 rxr->next_to_check = i;
4942 EM_RX_UNLOCK(rxr);
4948 em_rx_discard(struct rx_ring *rxr, int i)
4952 rbuf = &rxr->rx_buffers[i];
4953 bus_dmamap_unload(rxr->rxtag, rbuf->map);
4956 if (rxr->fmp != NULL) {
4957 rxr->fmp->m_flags |= M_PKTHDR;
4958 m_freem(rxr->fmp);
4959 rxr->fmp = NULL;
4960 rxr->lmp = NULL;
4989 em_fixup_rx(struct rx_ring *rxr)
4991 struct adapter *adapter = rxr->adapter;
4996 m = rxr->fmp;
5009 rxr->fmp = n;
5012 m_freem(rxr->fmp);
5013 rxr->fmp = NULL;
5703 struct rx_ring *rxr = adapter->rx_rings;
5751 for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
5781 E1000_RDH(rxr->me),
5786 E1000_RDT(rxr->me),
5790 CTLFLAG_RD, &rxr->rx_irq,
6200 struct rx_ring *rxr = adapter->rx_rings;
6212 for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
6227 rxr->rx_discarded);
6228 device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check);
6229 device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh);