Lines Matching refs:rx

20 static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
24 struct hfi1_devdata *dd = rx->dd;
121 static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
125 struct hfi1_devdata *dd = rx->dd;
133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
186 static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
190 struct hfi1_devdata *dd = rx->dd;
191 struct net_device *dev = &rx->rx_napi;
193 rx->num_rx_q = dd->num_netdev_contexts;
194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
197 if (!rx->rxq) {
202 for (i = 0; i < rx->num_rx_q; i++) {
203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
210 rxq->rx = rx;
230 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
238 kfree(rx->rxq);
239 rx->rxq = NULL;
244 static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
247 struct hfi1_devdata *dd = rx->dd;
249 for (i = 0; i < rx->num_rx_q; i++) {
250 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
258 kfree(rx->rxq);
259 rx->rxq = NULL;
260 rx->num_rx_q = 0;
263 static void enable_queues(struct hfi1_netdev_rx *rx)
267 for (i = 0; i < rx->num_rx_q; i++) {
268 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
270 dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
273 hfi1_rcvctrl(rx->dd,
279 static void disable_queues(struct hfi1_netdev_rx *rx)
283 msix_netdev_synchronize_irq(rx->dd);
285 for (i = 0; i < rx->num_rx_q; i++) {
286 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
288 dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
292 hfi1_rcvctrl(rx->dd,
309 struct hfi1_netdev_rx *rx = dd->netdev_rx;
312 if (atomic_fetch_inc(&rx->netdevs))
316 res = hfi1_netdev_rxq_init(rx);
329 struct hfi1_netdev_rx *rx = dd->netdev_rx;
332 if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
334 hfi1_netdev_rxq_deinit(rx);
342 * hfi1_alloc_rx - Allocates the rx support structure
345 * Allocate the rx structure to support gathering the receive
355 struct hfi1_netdev_rx *rx;
357 dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
358 rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
360 if (!rx)
362 rx->dd = dd;
363 init_dummy_netdev(&rx->rx_napi);
365 xa_init(&rx->dev_tbl);
366 atomic_set(&rx->enabled, 0);
367 atomic_set(&rx->netdevs, 0);
368 dd->netdev_rx = rx;
376 dd_dev_info(dd, "hfi1 rx freed\n");
393 struct hfi1_netdev_rx *rx;
398 rx = dd->netdev_rx;
399 if (atomic_fetch_inc(&rx->enabled))
403 enable_queues(rx);
409 struct hfi1_netdev_rx *rx;
414 rx = dd->netdev_rx;
415 if (atomic_dec_if_positive(&rx->enabled))
419 disable_queues(rx);
435 struct hfi1_netdev_rx *rx = dd->netdev_rx;
437 return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
449 struct hfi1_netdev_rx *rx = dd->netdev_rx;
451 return xa_erase(&rx->dev_tbl, id);
462 struct hfi1_netdev_rx *rx = dd->netdev_rx;
464 return xa_load(&rx->dev_tbl, id);
475 struct hfi1_netdev_rx *rx = dd->netdev_rx;
479 ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);