Lines Matching refs:eq

27 #define GET_EQ_NUM_PAGES(eq, pg_size)           \
28 (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
30 #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
32 #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
36 #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
40 #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
44 #define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
48 #define GET_EQ_ELEMENT(eq, idx) \
49 ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
50 (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
52 #define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
53 GET_EQ_ELEMENT(eq, idx))
55 #define GET_CEQ_ELEM(eq, idx) ((u32 *) \
56 GET_EQ_ELEMENT(eq, idx))
58 #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
60 #define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
63 #define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
65 #define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
66 #define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
79 #define aeq_to_aeqs(eq) \
80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
82 #define ceq_to_ceqs(eq) \
83 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
190 * @eq: the event queue to update the cons idx for
191 * @arm_state: the arm bit value of eq's interrupt
193 static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
195 u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
198 val = hinic_hwif_read_reg(eq->hwif, addr);
205 val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
206 HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
211 hinic_hwif_write_reg(eq->hwif, addr, val);
216 * @eq: the Async Event Queue that received the event
218 static void aeq_irq_handler(struct hinic_eq *eq)
220 struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
230 for (i = 0; i < eq->q_len; i++) {
231 aeqe_curr = GET_CURR_AEQ_ELEM(eq);
236 /* HW toggles the wrapped bit, when it adds eq element */
237 if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
268 eq->cons_idx++;
270 if (eq->cons_idx == eq->q_len) {
271 eq->cons_idx = 0;
272 eq->wrapped = !eq->wrapped;
312 * @eq: the Completion Event Queue that received the event
314 static void ceq_irq_handler(struct hinic_eq *eq)
316 struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
320 for (i = 0; i < eq->q_len; i++) {
321 ceqe = *(GET_CURR_CEQ_ELEM(eq));
326 /* HW toggles the wrapped bit, when it adds eq element event */
327 if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
332 eq->cons_idx++;
334 if (eq->cons_idx == eq->q_len) {
335 eq->cons_idx = 0;
336 eq->wrapped = !eq->wrapped;
347 struct hinic_eq *eq = data;
349 if (eq->type == HINIC_AEQ)
350 aeq_irq_handler(eq);
351 else if (eq->type == HINIC_CEQ)
352 ceq_irq_handler(eq);
354 eq_update_ci(eq, EQ_ARMED);
421 static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
423 struct msix_entry *msix_entry = &eq->msix_entry;
424 enum hinic_eq_type type = eq->type;
429 addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
431 val = hinic_hwif_read_reg(eq->hwif, addr);
440 HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
447 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
449 val = hinic_hwif_read_reg(eq->hwif, addr);
460 HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
469 static void set_ctrl0(struct hinic_eq *eq)
473 if (eq->type == HINIC_AEQ)
474 addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
476 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
478 val = get_ctrl0_val(eq, addr);
480 hinic_hwif_write_reg(eq->hwif, addr, val);
483 static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
486 enum hinic_eq_type type = eq->type;
490 addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
492 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
493 elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
495 val = hinic_hwif_read_reg(eq->hwif, addr);
501 ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
508 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
510 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
512 val = hinic_hwif_read_reg(eq->hwif, addr);
517 ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
525 static void set_ctrl1(struct hinic_eq *eq)
529 if (eq->type == HINIC_AEQ)
530 addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
532 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
534 val = get_ctrl1_val(eq, addr);
536 hinic_hwif_write_reg(eq->hwif, addr, val);
539 static int set_ceq_ctrl_reg(struct hinic_eq *eq)
542 struct hinic_hwdev *hwdev = eq->hwdev;
551 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
552 ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
553 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
554 ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
557 ceq_ctrl.q_id = eq->q_id;
566 eq->q_id, err, ceq_ctrl.status, out_size);
574 * set_eq_ctrls - setting eq's ctrl registers
575 * @eq: the Event Queue for setting
577 static int set_eq_ctrls(struct hinic_eq *eq)
579 if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
580 return set_ceq_ctrl_reg(eq);
582 set_ctrl0(eq);
583 set_ctrl1(eq);
589 * @eq: the Async Event Queue
592 static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
597 for (i = 0; i < eq->q_len; i++) {
598 aeqe = GET_AEQ_ELEM(eq, i);
607 * @eq: the event queue
610 static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
615 for (i = 0; i < eq->q_len; i++) {
616 ceqe = GET_CEQ_ELEM(eq, i);
625 * @eq: the event queue
629 static int alloc_eq_pages(struct hinic_eq *eq)
631 struct hinic_hwif *hwif = eq->hwif;
636 eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
637 sizeof(*eq->dma_addr), GFP_KERNEL);
638 if (!eq->dma_addr)
641 eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
642 sizeof(*eq->virt_addr), GFP_KERNEL);
643 if (!eq->virt_addr) {
648 for (pg = 0; pg < eq->num_pages; pg++) {
649 eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
650 eq->page_size,
651 &eq->dma_addr[pg],
653 if (!eq->virt_addr[pg]) {
658 addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
659 val = upper_32_bits(eq->dma_addr[pg]);
663 addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
664 val = lower_32_bits(eq->dma_addr[pg]);
669 init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
671 if (eq->type == HINIC_AEQ)
672 aeq_elements_init(eq, init_val);
673 else if (eq->type == HINIC_CEQ)
674 ceq_elements_init(eq, init_val);
680 dma_free_coherent(&pdev->dev, eq->page_size,
681 eq->virt_addr[pg],
682 eq->dma_addr[pg]);
684 devm_kfree(&pdev->dev, eq->virt_addr);
687 devm_kfree(&pdev->dev, eq->dma_addr);
693 * @eq: the Event Queue
695 static void free_eq_pages(struct hinic_eq *eq)
697 struct hinic_hwif *hwif = eq->hwif;
701 for (pg = 0; pg < eq->num_pages; pg++)
702 dma_free_coherent(&pdev->dev, eq->page_size,
703 eq->virt_addr[pg],
704 eq->dma_addr[pg]);
706 devm_kfree(&pdev->dev, eq->virt_addr);
707 devm_kfree(&pdev->dev, eq->dma_addr);
712 * @eq: the event queue
722 static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
729 eq->hwif = hwif;
730 eq->type = type;
731 eq->q_id = q_id;
732 eq->q_len = q_len;
733 eq->page_size = page_size;
736 hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
737 hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
739 eq->cons_idx = 0;
740 eq->wrapped = 0;
743 eq->elem_size = HINIC_AEQE_SIZE;
745 eq->elem_size = HINIC_CEQE_SIZE;
751 eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
752 eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
754 eq->msix_entry = entry;
756 if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
757 dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
761 if (eq->num_pages > EQ_MAX_PAGES) {
762 dev_err(&pdev->dev, "too many pages for eq\n");
766 err = set_eq_ctrls(eq);
768 dev_err(&pdev->dev, "Failed to set eq ctrls\n");
772 eq_update_ci(eq, EQ_ARMED);
774 err = alloc_eq_pages(eq);
776 dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
781 struct hinic_eq_work *aeq_work = &eq->aeq_work;
785 tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
789 hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
797 snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
799 err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
801 snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
803 err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
814 free_eq_pages(eq);
820 * @eq: the event queue
822 static void remove_eq(struct hinic_eq *eq)
824 hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
826 free_irq(eq->msix_entry.vector, eq);
828 if (eq->type == HINIC_AEQ) {
829 struct hinic_eq_work *aeq_work = &eq->aeq_work;
833 hinic_hwif_write_reg(eq->hwif,
834 HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
835 } else if (eq->type == HINIC_CEQ) {
836 tasklet_kill(&eq->ceq_tasklet);
838 hinic_hwif_write_reg(eq->hwif,
839 HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
843 eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
844 eq_update_ci(eq, EQ_NOT_ARMED);
846 free_eq_pages(eq);
961 struct hinic_eq *eq = NULL;
966 eq = &hwdev->func_to_io.ceqs.ceq[q_id];
967 addr = EQ_CONS_IDX_REG_ADDR(eq);
969 addr = EQ_PROD_IDX_REG_ADDR(eq);
972 q_id, ci, eq->cons_idx, pi,
973 eq->ceq_tasklet.state,
974 eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq))));
981 struct hinic_eq *eq = NULL;
986 eq = &hwdev->aeqs.aeq[q_id];
987 addr = EQ_CONS_IDX_REG_ADDR(eq);
989 addr = EQ_PROD_IDX_REG_ADDR(eq);
991 aeqe_pos = GET_CURR_AEQ_ELEM(eq);
993 q_id, ci, pi, work_busy(&eq->aeq_work.work),
994 eq->wrapped, be32_to_cpu(aeqe_pos->desc));