Lines Matching defs:gc

102 	struct gdma_context *gc = device_get_softc(dev);
110 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
112 device_printf(gc->dev,
123 if (gc->num_msix_usable > resp.max_msix)
124 gc->num_msix_usable = resp.max_msix;
126 if (gc->num_msix_usable <= 1)
129 gc->max_num_queues = mp_ncpus;
130 if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
131 gc->max_num_queues = MANA_MAX_NUM_QUEUES;
133 if (gc->max_num_queues > resp.max_eq)
134 gc->max_num_queues = resp.max_eq;
136 if (gc->max_num_queues > resp.max_cq)
137 gc->max_num_queues = resp.max_cq;
139 if (gc->max_num_queues > resp.max_sq)
140 gc->max_num_queues = resp.max_sq;
142 if (gc->max_num_queues > resp.max_rq)
143 gc->max_num_queues = resp.max_rq;
151 struct gdma_context *gc = device_get_softc(dev);
162 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
164 device_printf(gc->dev,
184 gc->mana.gdma_context = gc;
185 gc->mana.dev_id = gd_dev;
189 return gc->mana.dev_id.type == 0 ? ENODEV : 0;
193 mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
196 struct hw_channel_context *hwc = gc->hwc.driver_data;
214 mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
221 if (!gc || !gmi)
227 err = bus_dma_tag_create(bus_get_dma_tag(gc->dev), /* parent */
239 device_printf(gc->dev,
252 device_printf(gc->dev,
261 device_printf(gc->dev,
268 gmi->dev = gc->dev;
285 mana_gd_destroy_doorbell_page(struct gdma_context *gc, int doorbell_page)
298 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
300 device_printf(gc->dev,
310 mana_gd_allocate_doorbell_page(struct gdma_context *gc, int *doorbell_page)
326 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
328 device_printf(gc->dev,
340 mana_gd_create_hw_eq(struct gdma_context *gc,
362 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
364 device_printf(gc->dev,
379 struct gdma_context *gc = queue->gdma_dev->gdma_context;
396 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
398 device_printf(gc->dev,
413 mana_gd_ring_doorbell(struct gdma_context *gc, uint32_t db_index,
420 addr = (char *)gc->db_page_base + gc->db_page_size * db_index;
471 mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
473 mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
480 struct gdma_context *gc = cq->gdma_dev->gdma_context;
486 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
494 struct gdma_context *gc = eq->gdma_dev->gdma_context;
510 if (cq_id >= gc->max_num_cqs) {
513 cq_id, gc->max_num_cqs);
517 cq = gc->cq_table[cq_id];
530 gc->test_event_eq_id = eq->id;
535 complete(&gc->eq_test_event);
561 struct gdma_context *gc;
566 gc = eq->gdma_dev->gdma_context;
589 device_printf(gc->dev,
605 device_printf(gc->dev, "%p: %x\t%x\t%x\t%x\n",
624 mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
634 struct gdma_context *gc;
639 gc = gd->gdma_context;
640 r = &gc->msix_resource;
658 if (unlikely(msi_index >= gc->num_msix_usable)) {
659 device_printf(gc->dev,
661 msi_index, gc->num_msix_usable);
665 gic = &gc->irq_contexts[msi_index];
668 device_printf(gc->dev,
688 struct gdma_context *gc;
692 gc = gd->gdma_context;
693 r = &gc->msix_resource;
697 if (unlikely(msix_index >= gc->num_msix_usable))
700 gic = &gc->irq_contexts[msix_index];
715 mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
719 device_t dev = gc->dev;
722 sx_xlock(&gc->eq_test_event_sx);
724 init_completion(&gc->eq_test_event);
725 gc->test_event_eq_id = INVALID_QUEUE_ID;
733 err = mana_gd_send_request(gc, sizeof(req), &req,
748 if (wait_for_completion_timeout(&gc->eq_test_event, 30 * hz)) {
754 if (eq->id != gc->test_event_eq_id) {
757 gc->test_event_eq_id, eq->id);
763 sx_xunlock(&gc->eq_test_event_sx);
768 mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
774 err = mana_gd_test_eq(gc, queue);
776 device_printf(gc->dev,
790 struct gdma_context *gc = gd->gdma_context;
791 device_t dev = gc->dev;
818 err = mana_gd_create_hw_eq(gc, queue);
822 err = mana_gd_test_eq(gc, queue);
830 mana_gd_destroy_eq(gc, false, queue);
847 mana_gd_destroy_cq(struct gdma_context *gc,
852 if (id >= gc->max_num_cqs)
855 if (!gc->cq_table[id])
858 gc->cq_table[id] = NULL;
865 struct gdma_context *gc = gd->gdma_context;
875 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
905 mana_gd_destroy_dma_region(struct gdma_context *gc,
919 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
922 device_printf(gc->dev,
938 struct gdma_context *gc = gd->gdma_context;
956 hwc = gc->hwc.driver_data;
979 err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
985 device_printf(gc->dev, "Failed to create DMA region: 0x%x\n",
1002 struct gdma_context *gc = gd->gdma_context;
1015 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1049 struct gdma_context *gc = gd->gdma_context;
1063 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1093 mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
1099 mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
1103 mana_gd_destroy_cq(gc, queue);
1113 device_printf(gc->dev,
1119 mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
1130 struct gdma_context *gc = device_get_softc(dev);
1149 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1151 device_printf(gc->dev,
1163 struct gdma_context *gc = gd->gdma_context;
1177 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1179 device_printf(gc->dev,
1198 struct gdma_context *gc = gd->gdma_context;
1211 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1213 device_printf(gc->dev,
1333 struct gdma_context *gc;
1364 gc = wq->gdma_dev->gdma_context;
1365 device_printf(gc->dev, "unsuccessful flow control!\n");
1393 struct gdma_context *gc = queue->gdma_dev->gdma_context;
1400 mana_gd_wq_ring_doorbell(gc, queue);
1507 mana_gd_init_registers(struct gdma_context *gc)
1509 uintptr_t bar0_va = rman_get_bushandle(gc->bar0);
1510 vm_paddr_t bar0_pa = rman_get_start(gc->bar0);
1512 gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
1514 gc->db_page_base =
1515 (void *)(bar0_va + (size_t)mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET));
1517 gc->phys_db_page_base =
1518 bar0_pa + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
1520 gc->shm_base =
1521 (void *)(bar0_va + (size_t)mana_gd_r64(gc, GDMA_REG_SHM_OFFSET));
1525 gc->db_page_size, gc->db_page_base, gc->shm_base);
1563 mana_gd_free_pci_res(struct gdma_context *gc)
1565 if (!gc || !gc->dev)
1568 if (gc->bar0 != NULL) {
1569 bus_release_resource(gc->dev, SYS_RES_MEMORY,
1570 PCIR_BAR(GDMA_BAR0), gc->bar0);
1573 if (gc->msix != NULL) {
1574 bus_release_resource(gc->dev, SYS_RES_MEMORY,
1575 gc->msix_rid, gc->msix);
1583 struct gdma_context *gc = device_get_softc(dev);
1617 gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context),
1619 if (!gc->irq_contexts) {
1625 gic = &gc->irq_contexts[i];
1656 rc = mana_gd_alloc_res_map(nvec, &gc->msix_resource,
1664 gc->max_num_msix = nvec;
1665 gc->num_msix_usable = nvec;
1673 gic = &gc->irq_contexts[i];
1700 free(gc->irq_contexts, M_DEVBUF);
1701 gc->irq_contexts = NULL;
1711 struct gdma_context *gc = device_get_softc(dev);
1715 mana_gd_free_res_map(&gc->msix_resource);
1717 for (i = 0; i < gc->max_num_msix; i++) {
1718 gic = &gc->irq_contexts[i];
1741 gc->max_num_msix = 0;
1742 gc->num_msix_usable = 0;
1743 free(gc->irq_contexts, M_DEVBUF);
1744 gc->irq_contexts = NULL;
1787 struct gdma_context *gc;
1791 gc = device_get_softc(dev);
1792 gc->dev = dev;
1799 gc->bar0 = mana_gd_alloc_bar(dev, GDMA_BAR0);
1800 if (unlikely(gc->bar0 == NULL)) {
1808 gc->gd_bus.bar0_t = rman_get_bustag(gc->bar0);
1809 gc->gd_bus.bar0_h = rman_get_bushandle(gc->bar0);
1816 gc->msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1818 if (unlikely(gc->msix == NULL)) {
1824 gc->msix_rid = msix_rid;
1826 if (unlikely(gc->gd_bus.bar0_h == 0)) {
1832 mana_gd_init_registers(gc);
1834 mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1841 sx_init(&gc->eq_test_event_sx, "gdma test event sx");
1843 rc = mana_hwc_create_channel(gc);
1870 rc = mana_probe(&gc->mana);
1879 mana_hwc_destroy_channel(gc);
1883 mana_gd_free_pci_res(gc);
1900 struct gdma_context *gc = device_get_softc(dev);
1902 mana_remove(&gc->mana);
1904 mana_hwc_destroy_channel(gc);
1908 mana_gd_free_pci_res(gc);