Lines Matching defs:eq

11 void notify_eq(struct erdma_eq *eq)
13 u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
16 *eq->db_record = db_data;
17 writeq(db_data, eq->db);
19 atomic64_inc(&eq->notify_num);
22 void *get_next_valid_eqe(struct erdma_eq *eq)
24 u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
27 return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
85 struct erdma_eq *eq = &dev->aeq;
88 eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
89 buf_size = eq->depth << EQE_SHIFT;
91 eq->qbuf =
93 &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
94 if (!eq->qbuf)
97 spin_lock_init(&eq->lock);
98 atomic64_set(&eq->event_num, 0);
99 atomic64_set(&eq->notify_num, 0);
101 eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
102 eq->db_record = (u64 *)(eq->qbuf + buf_size);
105 upper_32_bits(eq->qbuf_dma_addr));
107 lower_32_bits(eq->qbuf_dma_addr));
108 erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
110 eq->qbuf_dma_addr + buf_size);
117 struct erdma_eq *eq = &dev->aeq;
120 WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
121 eq->qbuf_dma_addr);
136 ceqe = get_next_valid_eqe(&ceq_cb->eq);
141 ceq_cb->eq.ci++;
156 notify_eq(&ceq_cb->eq);
209 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
217 req.depth = ilog2(eq->depth);
218 req.qbuf_addr = eq->qbuf_dma_addr;
222 db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
231 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
235 eq->qbuf =
237 &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
238 if (!eq->qbuf)
241 spin_lock_init(&eq->lock);
242 atomic64_set(&eq->event_num, 0);
243 atomic64_set(&eq->notify_num, 0);
245 eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
246 eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
248 eq->db_record = (u64 *)(eq->qbuf + buf_size);
249 eq->ci = 0;
253 ret = create_eq_cmd(dev, ceqn + 1, eq);
261 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
279 dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
280 eq->qbuf_dma_addr);