Lines Matching refs:dd

24 	struct hfi1_devdata *dd = rx->dd;
31 ret = hfi1_create_rcvhdrq(dd, uctxt);
53 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
58 static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
64 if (dd->flags & HFI1_FROZEN)
67 ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
69 dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
91 static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
100 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
108 msix_free_irq(dd, uctxt->msix_intr);
114 hfi1_clear_ctxt_pkey(dd, uctxt);
125 struct hfi1_devdata *dd = rx->dd;
127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
129 dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
135 dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
136 hfi1_netdev_deallocate_ctxt(dd, *ctxt);
145 * @dd: device on which to allocate netdev contexts
156 u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
167 dd_dev_info(dd, "No receive contexts available for netdevs.\n");
172 dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
176 cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
190 struct hfi1_devdata *dd = rx->dd;
193 rx->num_rx_q = dd->num_netdev_contexts;
195 GFP_KERNEL, dd->node);
198 dd_dev_err(dd, "Unable to allocate netdev queue data\n");
212 dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
228 dd_dev_err(dd, "Unable to allot receive context\n");
233 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
247 struct hfi1_devdata *dd = rx->dd;
253 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
270 dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
273 hfi1_rcvctrl(rx->dd,
283 msix_netdev_synchronize_irq(rx->dd);
288 dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
292 hfi1_rcvctrl(rx->dd,
305 * @dd: hfi1 dev data
307 int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
309 struct hfi1_netdev_rx *rx = dd->netdev_rx;
325 * @dd: hfi1 dev data
327 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
329 struct hfi1_netdev_rx *rx = dd->netdev_rx;
343 * @dd: hfi1 dev data
348 * Updates dd struct pointer upon success.
353 int hfi1_alloc_rx(struct hfi1_devdata *dd)
357 dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
358 rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
362 rx->dd = dd;
368 dd->netdev_rx = rx;
373 void hfi1_free_rx(struct hfi1_devdata *dd)
375 if (dd->netdev_rx) {
376 dd_dev_info(dd, "hfi1 rx freed\n");
377 kfree(dd->netdev_rx);
378 dd->netdev_rx = NULL;
389 * @dd: hfi1 dev data
391 void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
395 if (!dd->netdev_rx)
398 rx = dd->netdev_rx;
407 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
411 if (!dd->netdev_rx)
414 rx = dd->netdev_rx;
429 * @dd: hfi1 dev data
433 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
435 struct hfi1_netdev_rx *rx = dd->netdev_rx;
444 * @dd: hfi1 dev data
447 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
449 struct hfi1_netdev_rx *rx = dd->netdev_rx;
457 * @dd: hfi1 dev data
460 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
462 struct hfi1_netdev_rx *rx = dd->netdev_rx;
470 * @dd: hfi1 dev data
473 void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
475 struct hfi1_netdev_rx *rx = dd->netdev_rx;