Lines Matching refs:io_sq

63 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
68 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
70 offset = tail_masked * io_sq->desc_entry_size;
72 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
75 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
78 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
83 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
86 if (is_llq_max_tx_burst_exists(io_sq)) {
87 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
88 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
93 io_sq->entries_in_tx_burst_left--;
94 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
96 io_sq->qid, io_sq->entries_in_tx_burst_left);
105 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
109 io_sq->tail++;
112 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
113 io_sq->phase ^= 1;
118 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
122 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
127 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
131 llq_info->descs_num_before_header * io_sq->desc_entry_size;
134 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
140 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
150 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
152 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
159 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
164 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
171 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
173 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
174 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
177 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
182 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
185 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
191 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
192 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
201 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
203 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
204 return get_sq_desc_llq(io_sq);
206 return get_sq_desc_regular_queue(io_sq);
209 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
211 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
212 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
216 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
219 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
225 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
226 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
234 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
240 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
242 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
243 return ena_com_sq_update_llq_tail(io_sq);
245 io_sq->tail++;
248 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
249 io_sq->phase ^= 1;
301 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
306 meta_desc = get_sq_desc(io_sq);
327 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
344 return ena_com_sq_update_tail(io_sq);
347 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
356 if (io_sq->disable_meta_caching) {
361 return ena_com_create_meta(io_sq, ena_meta);
364 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
367 memcpy(&io_sq->cached_tx_meta, ena_meta,
369 return ena_com_create_meta(io_sq, ena_meta);
414 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
423 u16 start_tail = io_sq->tail;
428 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
429 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
432 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
433 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
438 if (unlikely(header_len > io_sq->tx_max_header_size)) {
439 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
441 header_len, io_sq->tx_max_header_size);
445 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
447 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
452 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
456 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
458 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
465 rc = ena_com_close_bounce_buffer(io_sq);
467 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
469 *nb_hw_desc = io_sq->tail - start_tail;
473 desc = get_sq_desc(io_sq);
485 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
527 rc = ena_com_sq_update_tail(io_sq);
529 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
534 desc = get_sq_desc(io_sq);
540 desc->len_ctrl |= ((u32)io_sq->phase <<
549 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
560 rc = ena_com_sq_update_tail(io_sq);
562 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
567 rc = ena_com_close_bounce_buffer(io_sq);
569 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
572 *nb_hw_desc = io_sq->tail - start_tail;
577 struct ena_com_io_sq *io_sq,
624 io_sq->next_to_comp += nb_hw_desc;
628 io_sq->qid, io_sq->next_to_comp);
638 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
644 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
645 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
647 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
650 desc = get_sq_desc(io_sq);
661 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
665 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
667 __func__, io_sq->qid, req_id);
671 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
673 return ena_com_sq_update_tail(io_sq);