Lines Matching refs:ena_dev

104 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
108 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
109 ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
121 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
129 ena_trc_err(ena_dev, "Memory allocation failed\n");
144 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
152 ena_trc_err(ena_dev, "Memory allocation failed\n");
162 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
165 struct ena_com_aenq *aenq = &ena_dev->aenq;
169 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
171 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
177 ena_trc_err(ena_dev, "Memory allocation failed\n");
187 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
188 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
191 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
195 ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
198 ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
218 ena_trc_err(admin_queue->ena_dev,
225 ena_trc_err(admin_queue->ena_dev,
231 ena_trc_err(admin_queue->ena_dev,
262 ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
306 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
313 ena_trc_err(ena_dev, "Memory allocation failed\n");
351 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
360 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
367 io_sq->bus = ena_dev->bus;
370 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
378 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
386 ena_trc_err(ena_dev, "Memory allocation failed\n");
394 ena_dev->llq_info.desc_list_entry_size;
402 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
408 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
411 ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
415 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
440 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
456 io_cq->bus = ena_dev->bus;
458 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
467 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
476 ena_trc_err(ena_dev, "Memory allocation failed\n");
497 ena_trc_err(admin_queue->ena_dev,
554 ena_trc_err(admin_queue->ena_dev,
602 ena_trc_err(admin_queue->ena_dev,
615 admin_queue->ena_dev->ena_min_poll_delay_us);
619 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
628 admin_queue->ena_dev, "Invalid comp status %d\n",
643 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
648 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
652 admin_queue = &ena_dev->admin_queue;
673 ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
678 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
682 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
695 ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
710 ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
715 ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
739 ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
744 ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
753 ena_trc_err(ena_dev, "Illegal entry size %d\n",
777 ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
782 ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
799 rc = ena_com_set_llq(ena_dev);
801 ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
827 ena_trc_err(admin_queue->ena_dev,
834 ena_trc_err(admin_queue->ena_dev,
859 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
861 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
875 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
886 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
887 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
897 ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
907 ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
936 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
939 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
966 ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
971 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
980 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
992 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1002 ENA_MEM_FREE(ena_dev->dmadev,
1009 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
1019 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1022 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1033 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1037 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1044 !(ena_dev->supported_features & feature_mask))
1050 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1061 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1062 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
1067 admin_queue = &ena_dev->admin_queue;
1077 ret = ena_com_mem_addr_set(ena_dev,
1081 ena_trc_err(ena_dev, "Memory address set failed\n");
1098 ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
1104 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1109 return ena_com_get_feature_ex(ena_dev,
1117 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1119 return ena_dev->rss.hash_func;
1122 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1125 (ena_dev->rss).hash_key;
1134 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1136 struct ena_rss *rss = &ena_dev->rss;
1138 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1141 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1153 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1155 struct ena_rss *rss = &ena_dev->rss;
1158 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1166 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1168 struct ena_rss *rss = &ena_dev->rss;
1170 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1182 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1184 struct ena_rss *rss = &ena_dev->rss;
1187 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1195 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1198 struct ena_rss *rss = &ena_dev->rss;
1203 ret = ena_com_get_feature(ena_dev, &get_resp,
1210 ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1220 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1230 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1242 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1253 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1255 struct ena_rss *rss = &ena_dev->rss;
1260 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1268 ENA_MEM_FREE(ena_dev->dmadev,
1274 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1277 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1310 ret = ena_com_mem_addr_set(ena_dev,
1314 ena_trc_err(ena_dev, "Memory address set failed\n");
1325 ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
1331 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1335 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1339 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1343 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1348 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1350 struct ena_rss *rss = &ena_dev->rss;
1360 io_sq = &ena_dev->io_sq_queues[qid];
1371 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1374 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1377 ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1382 ena_dev->intr_moder_rx_interval =
1383 ena_dev->intr_moder_rx_interval *
1388 ena_dev->intr_moder_tx_interval =
1389 ena_dev->intr_moder_tx_interval *
1393 ena_dev->intr_delay_resolution = intr_delay_resolution;
1413 ena_trc_dbg(admin_queue->ena_dev,
1417 ena_trc_err(admin_queue->ena_dev,
1427 ena_trc_err(admin_queue->ena_dev,
1430 ena_trc_dbg(admin_queue->ena_dev,
1436 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1439 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1456 ret = ena_com_mem_addr_set(ena_dev,
1460 ena_trc_err(ena_dev, "Memory address set failed\n");
1470 ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
1476 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1481 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1486 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1489 ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1494 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1499 ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
1504 *io_sq = &ena_dev->io_sq_queues[qid];
1505 *io_cq = &ena_dev->io_cq_queues[qid];
1510 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1512 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1530 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1532 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1539 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1545 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1548 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1565 ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
1570 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1572 return ena_dev->admin_queue.running_state;
1575 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1577 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1581 ena_dev->admin_queue.running_state = state;
1585 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1587 u16 depth = ena_dev->aenq.q_depth;
1589 ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1594 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1597 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1605 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1607 ena_trc_info(ena_dev, "Can't get aenq configuration\n");
1612 ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1619 admin_queue = &ena_dev->admin_queue;
1633 ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
1638 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1640 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1644 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1651 ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
1654 ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
1658 ena_dev->dma_addr_bits = width;
1663 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1672 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1673 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1678 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1682 ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
1687 ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
1703 ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1711 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1719 ENA_MEM_FREE(ena_dev->dmadev,
1726 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1728 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1731 struct ena_com_aenq *aenq = &ena_dev->aenq;
1734 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1738 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1744 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1749 if (ena_dev->aenq.entries)
1750 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1756 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1763 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1764 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1765 ena_dev->admin_queue.polling = polling;
1768 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1770 return ena_dev->admin_queue.polling;
1773 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1776 ena_dev->admin_queue.auto_polling = polling;
1779 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1781 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1784 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1792 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1805 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1807 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1812 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1814 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1816 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1817 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1819 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1829 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1831 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1837 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1838 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1841 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1844 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1848 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1851 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1856 ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
1862 admin_queue->bus = ena_dev->bus;
1863 admin_queue->q_dmadev = ena_dev->dmadev;
1883 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1889 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1890 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1895 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1896 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1910 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1911 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1912 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1916 admin_queue->ena_dev = ena_dev;
1921 ena_com_admin_destroy(ena_dev);
1926 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1934 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
1939 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1940 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1961 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1963 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1966 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1970 ret = ena_com_create_io_cq(ena_dev, io_cq);
1974 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1981 ena_com_destroy_io_cq(ena_dev, io_cq);
1983 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1987 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1993 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
1998 io_sq = &ena_dev->io_sq_queues[qid];
1999 io_cq = &ena_dev->io_cq_queues[qid];
2001 ena_com_destroy_io_sq(ena_dev, io_sq);
2002 ena_com_destroy_io_cq(ena_dev, io_cq);
2004 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2007 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
2010 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
2013 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
2019 rc = ena_com_get_feature(ena_dev, &get_resp,
2027 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2029 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2030 rc = ena_com_get_feature(ena_dev, &get_resp,
2041 ena_dev->tx_max_header_size =
2044 rc = ena_com_get_feature(ena_dev, &get_resp,
2048 ena_dev->tx_max_header_size =
2055 rc = ena_com_get_feature(ena_dev, &get_resp,
2063 rc = ena_com_get_feature(ena_dev, &get_resp,
2074 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2084 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2096 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2098 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2104 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2107 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2119 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2123 struct ena_com_aenq *aenq = &ena_dev->aenq;
2145 ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2151 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2176 ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2177 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2186 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2194 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2195 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2202 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2208 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2209 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2213 ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2218 ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
2225 ena_trc_err(ena_dev, "Invalid timeout value\n");
2233 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2236 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2238 rc = wait_for_reset_state(ena_dev, timeout,
2241 ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
2246 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2247 rc = wait_for_reset_state(ena_dev, timeout, 0);
2249 ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
2257 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2259 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2264 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2273 admin_queue = &ena_dev->admin_queue;
2286 ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
2291 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2298 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2306 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2313 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2322 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2332 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2339 ret = ena_com_mem_addr_set(ena_dev,
2343 ena_trc_err(ena_dev, "Memory address set failed\n");
2348 get_cmd->device_id = ena_dev->stats_func;
2349 get_cmd->queue_idx = ena_dev->stats_queue;
2351 ret = ena_get_dev_stats(ena_dev, &ctx,
2359 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2366 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2373 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2374 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2379 admin_queue = &ena_dev->admin_queue;
2393 ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
2398 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2404 ret = ena_com_get_feature(ena_dev, &resp,
2407 ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
2416 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2418 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2419 struct ena_rss *rss = &ena_dev->rss;
2425 if (!ena_com_check_supported_feature_id(ena_dev,
2427 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2433 ret = ena_com_get_feature(ena_dev, &get_resp,
2439 ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
2453 ret = ena_com_mem_addr_set(ena_dev,
2457 ena_trc_err(ena_dev, "Memory address set failed\n");
2469 ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
2477 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2484 struct ena_rss *rss = &ena_dev->rss;
2493 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2501 ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
2509 ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n",
2522 ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func);
2528 rc = ena_com_set_hash_function(ena_dev);
2537 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2540 struct ena_rss *rss = &ena_dev->rss;
2547 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2564 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2567 ena_dev->rss.hash_key;
2576 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2580 struct ena_rss *rss = &ena_dev->rss;
2584 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2597 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2599 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2600 struct ena_rss *rss = &ena_dev->rss;
2606 if (!ena_com_check_supported_feature_id(ena_dev,
2608 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2623 ret = ena_com_mem_addr_set(ena_dev,
2627 ena_trc_err(ena_dev, "Memory address set failed\n");
2638 ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
2643 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2645 struct ena_rss *rss = &ena_dev->rss;
2652 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2688 ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2695 rc = ena_com_set_hash_ctrl(ena_dev);
2699 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2704 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2708 struct ena_rss *rss = &ena_dev->rss;
2714 ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
2719 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2726 ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
2732 rc = ena_com_set_hash_ctrl(ena_dev);
2736 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2741 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2744 struct ena_rss *rss = &ena_dev->rss;
2757 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2759 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2760 struct ena_rss *rss = &ena_dev->rss;
2765 if (!ena_com_check_supported_feature_id(ena_dev,
2767 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2772 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2774 ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
2787 ret = ena_com_mem_addr_set(ena_dev,
2791 ena_trc_err(ena_dev, "Memory address set failed\n");
2805 ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
2810 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2812 struct ena_rss *rss = &ena_dev->rss;
2820 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2836 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2840 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2842 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2850 rc = ena_com_hash_key_allocate(ena_dev);
2852 ena_com_hash_key_fill_default_key(ena_dev);
2856 rc = ena_com_hash_ctrl_init(ena_dev);
2863 ena_com_hash_key_destroy(ena_dev);
2865 ena_com_indirect_table_destroy(ena_dev);
2871 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2873 ena_com_indirect_table_destroy(ena_dev);
2874 ena_com_hash_key_destroy(ena_dev);
2875 ena_com_hash_ctrl_destroy(ena_dev);
2877 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2880 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2882 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2884 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2899 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2902 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2904 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2919 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2921 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2924 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2933 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2935 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2938 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2947 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2949 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2961 admin_queue = &ena_dev->admin_queue;
2966 ret = ena_com_mem_addr_set(ena_dev,
2970 ena_trc_err(ena_dev, "Memory address set failed\n");
2974 ret = ena_com_mem_addr_set(ena_dev,
2978 ena_trc_err(ena_dev, "Memory address set failed\n");
2991 ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
2997 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2999 return ena_com_check_supported_feature_id(ena_dev,
3003 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
3009 ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
3018 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
3021 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3023 ena_dev->intr_delay_resolution,
3024 &ena_dev->intr_moder_tx_interval);
3027 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
3030 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3032 ena_dev->intr_delay_resolution,
3033 &ena_dev->intr_moder_rx_interval);
3036 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3042 rc = ena_com_get_feature(ena_dev, &get_resp,
3047 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3051 ena_trc_err(ena_dev,
3056 ena_com_disable_adaptive_moderation(ena_dev);
3062 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3065 ena_com_disable_adaptive_moderation(ena_dev);
3070 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3072 return ena_dev->intr_moder_tx_interval;
3075 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3077 return ena_dev->intr_moder_rx_interval;
3080 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3084 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3088 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3092 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3096 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3099 if (unlikely(ena_dev->tx_max_header_size == 0)) {
3100 ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
3104 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;