Lines Matching defs:dd

13  * @dd: valid hfi1 devdata
16 int msix_initialize(struct hfi1_devdata *dd)
30 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
35 ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
37 dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
41 entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
44 pci_free_irq_vectors(dd->pcidev);
48 dd->msix_info.msix_entries = entries;
49 spin_lock_init(&dd->msix_info.msix_lock);
50 bitmap_zero(dd->msix_info.in_use_msix, total);
51 dd->msix_info.max_requested = total;
52 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
59 * @dd: valid devdata
74 static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
84 spin_lock(&dd->msix_info.msix_lock);
85 nr = find_first_zero_bit(dd->msix_info.in_use_msix,
86 dd->msix_info.max_requested);
87 if (nr < dd->msix_info.max_requested)
88 __set_bit(nr, dd->msix_info.in_use_msix);
89 spin_unlock(&dd->msix_info.msix_lock);
91 if (nr == dd->msix_info.max_requested)
97 irq = pci_irq_vector(dd->pcidev, nr);
98 ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
100 dd_dev_err(dd,
103 spin_lock(&dd->msix_info.msix_lock);
104 __clear_bit(nr, dd->msix_info.in_use_msix);
105 spin_unlock(&dd->msix_info.msix_lock);
113 me = &dd->msix_info.msix_entries[nr];
119 ret = hfi1_get_irq_affinity(dd, me);
121 dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
131 int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
159 rcd->dd->unit, rcd->ctxt);
175 rcd->dd->unit, rcd->ctxt);
191 sde->dd->unit, sde->this_idx);
192 nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
197 remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
205 * @dd: valid device data
207 int msix_request_general_irq(struct hfi1_devdata *dd)
212 snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
213 nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
220 msix_free_irq(dd, (u8)nr);
221 dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
230 * @dd: valid devdata structure
233 static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
235 set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
236 set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
238 set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
239 set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
245 * @dd: valid devdata structure
250 int msix_request_irqs(struct hfi1_devdata *dd)
253 int ret = msix_request_general_irq(dd);
258 for (i = 0; i < dd->num_sdma; i++) {
259 struct sdma_engine *sde = &dd->per_sdma[i];
264 enable_sdma_srcs(sde->dd, i);
267 for (i = 0; i < dd->n_krcv_queues; i++) {
268 struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
282 * @dd: valid devdata
286 void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
290 if (msix_intr >= dd->msix_info.max_requested)
293 me = &dd->msix_info.msix_entries[msix_intr];
298 hfi1_put_irq_affinity(dd, me);
299 pci_free_irq(dd->pcidev, msix_intr, me->arg);
303 spin_lock(&dd->msix_info.msix_lock);
304 __clear_bit(msix_intr, dd->msix_info.in_use_msix);
305 spin_unlock(&dd->msix_info.msix_lock);
310 * @dd: valid device data data structure
314 void msix_clean_up_interrupts(struct hfi1_devdata *dd)
317 struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
320 for (i = 0; i < dd->msix_info.max_requested; i++, me++)
321 msix_free_irq(dd, i);
324 kfree(dd->msix_info.msix_entries);
325 dd->msix_info.msix_entries = NULL;
326 dd->msix_info.max_requested = 0;
328 pci_free_irq_vectors(dd->pcidev);
333 * @dd: valid devdata
335 void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
338 int ctxt_count = hfi1_netdev_ctxt_count(dd);
341 struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
344 me = &dd->msix_info.msix_entries[rcd->msix_intr];