Lines Matching refs:io_sq

353 			      struct ena_com_io_sq *io_sq)
358 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
360 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
361 io_sq->desc_entry_size =
362 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
366 size = io_sq->desc_entry_size * io_sq->q_depth;
367 io_sq->bus = ena_dev->bus;
369 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
372 io_sq->desc_addr.virt_addr,
373 io_sq->desc_addr.phys_addr,
374 io_sq->desc_addr.mem_handle,
377 if (!io_sq->desc_addr.virt_addr) {
380 io_sq->desc_addr.virt_addr,
381 io_sq->desc_addr.phys_addr,
382 io_sq->desc_addr.mem_handle);
385 if (!io_sq->desc_addr.virt_addr) {
391 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
393 io_sq->bounce_buf_ctrl.buffer_size =
395 io_sq->bounce_buf_ctrl.buffers_num =
397 io_sq->bounce_buf_ctrl.next_to_use = 0;
399 size = io_sq->bounce_buf_ctrl.buffer_size *
400 io_sq->bounce_buf_ctrl.buffers_num;
404 io_sq->bounce_buf_ctrl.base_buffer,
407 if (!io_sq->bounce_buf_ctrl.base_buffer)
408 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
410 if (!io_sq->bounce_buf_ctrl.base_buffer) {
415 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
416 sizeof(io_sq->llq_info));
419 io_sq->llq_buf_ctrl.curr_bounce_buf =
420 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
421 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
422 0x0, io_sq->llq_info.desc_list_entry_size);
423 io_sq->llq_buf_ctrl.descs_left_in_line =
424 io_sq->llq_info.descs_num_before_header;
425 io_sq->disable_meta_caching =
426 io_sq->llq_info.disable_meta_caching;
428 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
429 io_sq->entries_in_tx_burst_left =
430 io_sq->llq_info.max_entries_in_tx_burst;
433 io_sq->tail = 0;
434 io_sq->next_to_comp = 0;
435 io_sq->phase = 1;
937 struct ena_com_io_sq *io_sq)
947 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
956 destroy_cmd.sq.sq_idx = io_sq->idx;
972 struct ena_com_io_sq *io_sq,
989 if (io_sq->desc_addr.virt_addr) {
990 size = io_sq->desc_entry_size * io_sq->q_depth;
994 io_sq->desc_addr.virt_addr,
995 io_sq->desc_addr.phys_addr,
996 io_sq->desc_addr.mem_handle);
998 io_sq->desc_addr.virt_addr = NULL;
1001 if (io_sq->bounce_buf_ctrl.base_buffer) {
1003 io_sq->bounce_buf_ctrl.base_buffer,
1004 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
1005 io_sq->bounce_buf_ctrl.base_buffer = NULL;
1275 struct ena_com_io_sq *io_sq, u16 cq_idx)
1287 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1296 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1307 create_cmd.sq_depth = io_sq->q_depth;
1309 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1312 io_sq->desc_addr.phys_addr);
1329 io_sq->idx = cmd_completion.sq_idx;
1331 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1334 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1335 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1338 io_sq->desc_addr.pbuf_dev_addr =
1343 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1351 struct ena_com_io_sq *io_sq;
1360 io_sq = &ena_dev->io_sq_queues[qid];
1362 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1365 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1495 struct ena_com_io_sq **io_sq,
1504 *io_sq = &ena_dev->io_sq_queues[qid];
1929 struct ena_com_io_sq *io_sq;
1939 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1942 memset(io_sq, 0x0, sizeof(*io_sq));
1952 io_sq->q_depth = ctx->queue_size;
1953 io_sq->direction = ctx->direction;
1954 io_sq->qid = ctx->qid;
1956 io_sq->mem_queue_type = ctx->mem_queue_type;
1960 io_sq->tx_max_header_size =
1963 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1974 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1983 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1989 struct ena_com_io_sq *io_sq;
1998 io_sq = &ena_dev->io_sq_queues[qid];
2001 ena_com_destroy_io_sq(ena_dev, io_sq);
2004 ena_com_io_queue_free(ena_dev, io_sq, io_cq);