Lines Matching refs:cq

8  * @cq: pointer to the specific control queue
11 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
15 cq->reg.head = q_create_info->reg.head;
16 cq->reg.tail = q_create_info->reg.tail;
17 cq->reg.len = q_create_info->reg.len;
18 cq->reg.bah = q_create_info->reg.bah;
19 cq->reg.bal = q_create_info->reg.bal;
20 cq->reg.len_mask = q_create_info->reg.len_mask;
21 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
22 cq->reg.head_mask = q_create_info->reg.head_mask;
28 * @cq: pointer to the specific Control queue
34 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
39 wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
42 if (cq->q_id != -1)
46 wr32(hw, cq->reg.head, 0);
49 wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
50 wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
51 wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
56 * @cq: pointer to the specific Control queue
61 static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
65 for (i = 0; i < cq->ring_size; i++) {
66 struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
67 struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
93 * @cq: pointer to the specific Control queue
97 static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
99 mutex_lock(&cq->cq_lock);
102 idpf_ctlq_dealloc_ring_res(hw, cq);
105 cq->ring_size = 0;
107 mutex_unlock(&cq->cq_lock);
108 mutex_destroy(&cq->cq_lock);
118 * The cq parameter will be allocated/initialized and passed back to the caller
127 struct idpf_ctlq_info *cq;
131 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
132 if (!cq)
135 cq->cq_type = qinfo->type;
136 cq->q_id = qinfo->id;
137 cq->buf_size = qinfo->buf_size;
138 cq->ring_size = qinfo->len;
140 cq->next_to_use = 0;
141 cq->next_to_clean = 0;
142 cq->next_to_post = cq->ring_size - 1;
149 err = idpf_ctlq_alloc_ring_res(hw, cq);
160 idpf_ctlq_init_rxq_bufs(cq);
163 cq->bi.tx_msg = kcalloc(qinfo->len,
166 if (!cq->bi.tx_msg) {
172 idpf_ctlq_setup_regs(cq, qinfo);
174 idpf_ctlq_init_regs(hw, cq, is_rxq);
176 mutex_init(&cq->cq_lock);
178 list_add(&cq->cq_list, &hw->cq_list_head);
180 *cq_out = cq;
186 idpf_ctlq_dealloc_ring_res(hw, cq);
188 kfree(cq);
196 * @cq: pointer to control queue to be removed
199 struct idpf_ctlq_info *cq)
201 list_del(&cq->cq_list);
202 idpf_ctlq_shutdown(hw, cq);
203 kfree(cq);
220 struct idpf_ctlq_info *cq, *tmp;
229 err = idpf_ctlq_add(hw, qinfo, &cq);
237 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
238 idpf_ctlq_remove(hw, cq);
249 struct idpf_ctlq_info *cq, *tmp;
251 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
252 idpf_ctlq_remove(hw, cq);
258 * @cq: handle to control queue struct to send on
267 int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
275 mutex_lock(&cq->cq_lock);
278 num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
287 desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
320 cq->bi.tx_msg[cq->next_to_use] = msg;
322 (cq->next_to_use)++;
323 if (cq->next_to_use == cq->ring_size)
324 cq->next_to_use = 0;
332 wr32(hw, cq->reg.tail, cq->next_to_use);
335 mutex_unlock(&cq->cq_lock);
343 * @cq: pointer to the specific Control queue
355 int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364 if (*clean_count > cq->ring_size)
367 mutex_lock(&cq->cq_lock);
369 ntc = cq->next_to_clean;
375 desc = IDPF_CTLQ_DESC(cq, ntc);
382 msg_status[i] = cq->bi.tx_msg[ntc];
385 cq->bi.tx_msg[ntc] = NULL;
391 if (ntc == cq->ring_size)
395 cq->next_to_clean = ntc;
397 mutex_unlock(&cq->cq_lock);
408 * @cq: pointer to control queue handle
420 int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
424 u16 ntp = cq->next_to_post;
429 if (*buff_count > cq->ring_size)
435 mutex_lock(&cq->cq_lock);
437 if (tbp >= cq->ring_size)
440 if (tbp == cq->next_to_clean)
445 while (ntp != cq->next_to_clean) {
446 desc = IDPF_CTLQ_DESC(cq, ntp);
448 if (cq->bi.rx_buff[ntp])
459 if (tbp >= cq->ring_size)
462 while (tbp != cq->next_to_clean) {
463 if (cq->bi.rx_buff[tbp]) {
464 cq->bi.rx_buff[ntp] =
465 cq->bi.rx_buff[tbp];
466 cq->bi.rx_buff[tbp] = NULL;
476 if (tbp >= cq->ring_size)
480 if (tbp == cq->next_to_clean)
484 cq->bi.rx_buff[ntp] = buffs[i];
496 desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
498 cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));
500 cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));
503 if (ntp == cq->ring_size)
509 if (cq->next_to_post != ntp) {
514 cq->next_to_post = ntp - 1;
517 cq->next_to_post = cq->ring_size - 1;
521 wr32(hw, cq->reg.tail, cq->next_to_post);
524 mutex_unlock(&cq->cq_lock);
534 * @cq: pointer to control queue handle to receive on
543 int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
552 mutex_lock(&cq->cq_lock);
554 ntc = cq->next_to_clean;
560 desc = IDPF_CTLQ_DESC(cq, ntc);
590 q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
595 cq->bi.rx_buff[ntc] = NULL;
605 if (ntc == cq->ring_size)
609 cq->next_to_clean = ntc;
611 mutex_unlock(&cq->cq_lock);