ena_com.c revision 1.2
1/*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include "ena_com.h" 35#ifdef ENA_INTERNAL 36#include "ena_gen_info.h" 37#endif 38 39/*****************************************************************************/ 40/*****************************************************************************/ 41 42/* Timeout in micro-sec */ 43#define ADMIN_CMD_TIMEOUT_US (3000000) 44 45#define ENA_ASYNC_QUEUE_DEPTH 16 46#define ENA_ADMIN_QUEUE_DEPTH 32 47 48#ifdef ENA_EXTENDED_STATS 49 50#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08 51#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF) 52#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16) 53 54#endif /* ENA_EXTENDED_STATS */ 55#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ 56 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ 57 | (ENA_COMMON_SPEC_VERSION_MINOR)) 58 59#define ENA_CTRL_MAJOR 0 60#define ENA_CTRL_MINOR 0 61#define ENA_CTRL_SUB_MINOR 1 62 63#define MIN_ENA_CTRL_VER \ 64 (((ENA_CTRL_MAJOR) << \ 65 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 66 ((ENA_CTRL_MINOR) << \ 67 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 68 (ENA_CTRL_SUB_MINOR)) 69 70#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 71#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 72 73#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 74 75#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 76 77#define ENA_REGS_ADMIN_INTR_MASK 1 78 79/*****************************************************************************/ 80/*****************************************************************************/ 81/*****************************************************************************/ 82 83enum ena_cmd_status { 84 ENA_CMD_SUBMITTED, 85 ENA_CMD_COMPLETED, 86 /* Abort - canceled by the driver */ 87 ENA_CMD_ABORTED, 88}; 89 90struct ena_comp_ctx { 91 ena_wait_event_t wait_event; 92 struct ena_admin_acq_entry *user_cqe; 93 u32 comp_size; 94 enum ena_cmd_status status; 95 /* status from the device */ 96 u8 comp_status; 97 u8 cmd_opcode; 98 bool occupied; 99}; 100 101struct ena_com_stats_ctx { 102 struct ena_admin_aq_get_stats_cmd get_cmd; 103 struct ena_admin_acq_get_stats_resp get_resp; 104}; 105 106static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 107 struct ena_common_mem_addr *ena_addr, 108 dma_addr_t addr) 109{ 110 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 111 ena_trc_err("dma address has more bits that the device supports\n"); 112 return ENA_COM_INVAL; 113 } 114 115 ena_addr->mem_addr_low = (u32)addr; 116 ena_addr->mem_addr_high = (u16)((u64)addr >> 32); 117 118 return 0; 119} 120 121static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) 122{ 123 struct ena_com_admin_sq *sq = &queue->sq; 124 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 125 126 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, 127 sq->mem_handle); 128 129 if (!sq->entries) { 130 ena_trc_err("memory allocation failed"); 131 return ENA_COM_NO_MEM; 132 } 133 134 sq->head = 0; 135 sq->tail = 0; 136 sq->phase = 1; 137 138 sq->db_addr = NULL; 139 140 return 0; 141} 142 143static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) 144{ 145 struct ena_com_admin_cq *cq = &queue->cq; 146 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 147 148 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr, 149 cq->mem_handle); 150 151 if (!cq->entries) { 152 ena_trc_err("memory allocation failed"); 153 return ENA_COM_NO_MEM; 154 } 155 156 cq->head = 0; 157 cq->phase = 1; 158 159 return 0; 160} 161 162static int ena_com_admin_init_aenq(struct ena_com_dev *dev, 163 struct ena_aenq_handlers *aenq_handlers) 164{ 165 struct ena_com_aenq *aenq = &dev->aenq; 166 u32 addr_low, addr_high, aenq_caps; 167 u16 size; 168 169 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 170 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 171 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size, 172 aenq->entries, 173 aenq->dma_addr, 174 aenq->mem_handle); 175 176 if (!aenq->entries) { 177 ena_trc_err("memory allocation failed"); 178 return ENA_COM_NO_MEM; 179 } 180 181 aenq->head = aenq->q_depth; 182 aenq->phase = 1; 183 184 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 185 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 186 187 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 188 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 189 190 aenq_caps = 0; 191 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 192 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << 193 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 194 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 195 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 196 197 if (unlikely(!aenq_handlers)) { 198 ena_trc_err("aenq handlers pointer is NULL\n"); 199 return ENA_COM_INVAL; 200 } 201 202 aenq->aenq_handlers = aenq_handlers; 203 204 return 0; 205} 206 207static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, 208 struct ena_comp_ctx *comp_ctx) 209{ 210 comp_ctx->occupied = false; 211 ATOMIC32_DEC(&queue->outstanding_cmds); 212} 213 214static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, 215 u16 command_id, bool capture) 216{ 217 if (unlikely(command_id >= queue->q_depth)) { 218 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", 219 command_id, queue->q_depth); 220 return NULL; 221 } 222 223 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { 224 ena_trc_err("Completion context is occupied\n"); 225 return NULL; 226 } 227 228 if (capture) { 229 ATOMIC32_INC(&queue->outstanding_cmds); 230 queue->comp_ctx[command_id].occupied = true; 231 } 232 233 return &queue->comp_ctx[command_id]; 234} 235 236static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 237 struct ena_admin_aq_entry *cmd, 238 size_t cmd_size_in_bytes, 239 struct ena_admin_acq_entry *comp, 240 size_t comp_size_in_bytes) 241{ 242 struct ena_comp_ctx *comp_ctx; 243 u16 tail_masked, cmd_id; 244 u16 queue_size_mask; 245 u16 cnt; 246 247 queue_size_mask = admin_queue->q_depth - 1; 248 249 tail_masked = admin_queue->sq.tail & queue_size_mask; 250 251 /* In case of queue FULL */ 252 cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds); 253 if (cnt >= admin_queue->q_depth) { 254 ena_trc_dbg("admin queue is full.\n"); 255 admin_queue->stats.out_of_space++; 256 return ERR_PTR(ENA_COM_NO_SPACE); 257 } 258 259 cmd_id = admin_queue->curr_cmd_id; 260 261 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 262 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 263 264 cmd->aq_common_descriptor.command_id |= cmd_id & 265 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 266 267 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 268 if (unlikely(!comp_ctx)) 269 return ERR_PTR(ENA_COM_INVAL); 270 271 comp_ctx->status = ENA_CMD_SUBMITTED; 272 comp_ctx->comp_size = (u32)comp_size_in_bytes; 273 comp_ctx->user_cqe = comp; 274 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 275 276 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event); 277 278 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 279 280 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 281 queue_size_mask; 282 283 admin_queue->sq.tail++; 284 admin_queue->stats.submitted_cmd++; 285 286 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 287 admin_queue->sq.phase = !admin_queue->sq.phase; 288 289 ENA_DB_SYNC(&admin_queue->sq.mem_handle); 290 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail, 291 admin_queue->sq.db_addr); 292 293 return comp_ctx; 294} 295 296static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) 297{ 298 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); 299 struct ena_comp_ctx *comp_ctx; 300 u16 i; 301 302 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); 303 if (unlikely(!queue->comp_ctx)) { 304 ena_trc_err("memory allocation failed"); 305 return ENA_COM_NO_MEM; 306 } 307 308 for (i = 0; i < queue->q_depth; i++) { 309 comp_ctx = get_comp_ctxt(queue, i, false); 310 if (comp_ctx) 311 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); 312 } 313 314 return 0; 315} 316 317static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 318 struct ena_admin_aq_entry *cmd, 319 size_t cmd_size_in_bytes, 320 struct ena_admin_acq_entry *comp, 321 size_t comp_size_in_bytes) 322{ 323 unsigned long flags; 324 struct ena_comp_ctx *comp_ctx; 325 326 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 327 if (unlikely(!admin_queue->running_state)) { 328 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 329 return ERR_PTR(ENA_COM_NO_DEVICE); 330 } 331 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 332 cmd_size_in_bytes, 333 comp, 334 comp_size_in_bytes); 335 if (unlikely(IS_ERR(comp_ctx))) 336 admin_queue->running_state = false; 337 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 338 339 return comp_ctx; 340} 341 342static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 343 struct ena_com_create_io_ctx *ctx, 344 struct ena_com_io_sq *io_sq) 345{ 346 size_t size; 347 int dev_node = 0; 348 349 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 350 351 io_sq->desc_entry_size = 352 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 353 sizeof(struct ena_eth_io_tx_desc) : 354 sizeof(struct ena_eth_io_rx_desc); 355 356 size = io_sq->desc_entry_size * io_sq->q_depth; 357 io_sq->bus = ena_dev->bus; 358 359 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 360 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 361 size, 362 io_sq->desc_addr.virt_addr, 363 io_sq->desc_addr.phys_addr, 364 io_sq->desc_addr.mem_handle, 365 ctx->numa_node, 366 dev_node); 367 if (!io_sq->desc_addr.virt_addr) { 368 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 369 size, 370 io_sq->desc_addr.virt_addr, 371 io_sq->desc_addr.phys_addr, 372 io_sq->desc_addr.mem_handle); 373 } 374 375 if (!io_sq->desc_addr.virt_addr) { 376 ena_trc_err("memory allocation failed"); 377 return ENA_COM_NO_MEM; 378 } 379 } 380 381 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 382 /* Allocate bounce buffers */ 383 io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size; 384 io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 385 io_sq->bounce_buf_ctrl.next_to_use = 0; 386 387 size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num; 388 389 ENA_MEM_ALLOC_NODE(ena_dev->dmadev, 390 size, 391 io_sq->bounce_buf_ctrl.base_buffer, 392 ctx->numa_node, 393 dev_node); 394 if (!io_sq->bounce_buf_ctrl.base_buffer) 395 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size); 396 397 if (!io_sq->bounce_buf_ctrl.base_buffer) { 398 ena_trc_err("bounce buffer memory allocation failed"); 399 return ENA_COM_NO_MEM; 400 } 401 402 memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info)); 403 404 /* Initiate the first bounce buffer */ 405 io_sq->llq_buf_ctrl.curr_bounce_buf = 406 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 407 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 408 0x0, io_sq->llq_info.desc_list_entry_size); 409 io_sq->llq_buf_ctrl.descs_left_in_line = 410 io_sq->llq_info.descs_num_before_header; 411 } 412 413 io_sq->tail = 0; 414 io_sq->next_to_comp = 0; 415 io_sq->phase = 1; 416 417 return 0; 418} 419 420static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 421 struct ena_com_create_io_ctx *ctx, 422 struct ena_com_io_cq *io_cq) 423{ 424 size_t size; 425 int prev_node = 0; 426 427 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 428 429 /* Use the basic completion descriptor for Rx */ 430 io_cq->cdesc_entry_size_in_bytes = 431 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 432 sizeof(struct ena_eth_io_tx_cdesc) : 433 sizeof(struct ena_eth_io_rx_cdesc_base); 434 435 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 436 io_cq->bus = ena_dev->bus; 437 438 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 439 size, 440 io_cq->cdesc_addr.virt_addr, 441 io_cq->cdesc_addr.phys_addr, 442 io_cq->cdesc_addr.mem_handle, 443 ctx->numa_node, 444 prev_node); 445 if (!io_cq->cdesc_addr.virt_addr) { 446 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 447 size, 448 io_cq->cdesc_addr.virt_addr, 449 io_cq->cdesc_addr.phys_addr, 450 io_cq->cdesc_addr.mem_handle); 451 } 452 453 if (!io_cq->cdesc_addr.virt_addr) { 454 ena_trc_err("memory allocation failed"); 455 return ENA_COM_NO_MEM; 456 } 457 458 io_cq->phase = 1; 459 io_cq->head = 0; 460 461 return 0; 462} 463 464static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 465 struct ena_admin_acq_entry *cqe) 466{ 467 struct ena_comp_ctx *comp_ctx; 468 u16 cmd_id; 469 470 cmd_id = cqe->acq_common_descriptor.command & 471 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 472 473 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 474 if (unlikely(!comp_ctx)) { 475 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n"); 476 admin_queue->running_state = false; 477 return; 478 } 479 480 comp_ctx->status = ENA_CMD_COMPLETED; 481 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 482 483 if (comp_ctx->user_cqe) 484 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 485 486 if (!admin_queue->polling) 487 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 488} 489 490static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 491{ 492 struct ena_admin_acq_entry *cqe = NULL; 493 u16 comp_num = 0; 494 u16 head_masked; 495 u8 phase; 496 497 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 498 phase = admin_queue->cq.phase; 499 500 cqe = &admin_queue->cq.entries[head_masked]; 501 502 /* Go over all the completions */ 503 while ((cqe->acq_common_descriptor.flags & 504 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 505 /* Do not read the rest of the completion entry before the 506 * phase bit was validated 507 */ 508 rmb(); 509 ena_com_handle_single_admin_completion(admin_queue, cqe); 510 511 head_masked++; 512 comp_num++; 513 if (unlikely(head_masked == admin_queue->q_depth)) { 514 head_masked = 0; 515 phase = !phase; 516 } 517 518 cqe = &admin_queue->cq.entries[head_masked]; 519 } 520 521 admin_queue->cq.head += comp_num; 522 admin_queue->cq.phase = phase; 523 admin_queue->sq.head += comp_num; 524 admin_queue->stats.completed_cmd += comp_num; 525} 526 527static int ena_com_comp_status_to_errno(u8 comp_status) 528{ 529 if (unlikely(comp_status != 0)) 530 ena_trc_err("admin command failed[%u]\n", comp_status); 531 532 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) 533 return ENA_COM_INVAL; 534 535 switch (comp_status) { 536 case ENA_ADMIN_SUCCESS: 537 return 0; 538 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 539 return ENA_COM_NO_MEM; 540 case ENA_ADMIN_UNSUPPORTED_OPCODE: 541 return ENA_COM_UNSUPPORTED; 542 case ENA_ADMIN_BAD_OPCODE: 543 case ENA_ADMIN_MALFORMED_REQUEST: 544 case ENA_ADMIN_ILLEGAL_PARAMETER: 545 case ENA_ADMIN_UNKNOWN_ERROR: 546 return ENA_COM_INVAL; 547 } 548 549 return 0; 550} 551 552static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 553 struct ena_com_admin_queue *admin_queue) 554{ 555 unsigned long flags, timeout; 556 int ret; 557 558 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); 559 560 while (1) { 561 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 562 ena_com_handle_admin_completion(admin_queue); 563 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 564 565 if (comp_ctx->status != ENA_CMD_SUBMITTED) 566 break; 567 568 if (ENA_TIME_EXPIRE(timeout)) { 569 ena_trc_err("Wait for completion (polling) timeout\n"); 570 /* ENA didn't have any completion */ 571 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 572 admin_queue->stats.no_completion++; 573 admin_queue->running_state = false; 574 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 575 576 ret = ENA_COM_TIMER_EXPIRED; 577 goto err; 578 } 579 580 ENA_MSLEEP(100); 581 } 582 583 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 584 ena_trc_err("Command was aborted\n"); 585 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 586 admin_queue->stats.aborted_cmd++; 587 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 588 ret = ENA_COM_NO_DEVICE; 589 goto err; 590 } 591 592 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, 593 "Invalid comp status %d\n", comp_ctx->status); 594 595 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 596err: 597 comp_ctxt_release(admin_queue, comp_ctx); 598 return ret; 599} 600 601static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, 602 struct ena_admin_feature_llq_desc *llq_desc) 603{ 604 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 605 606 memset(llq_info, 0, sizeof(*llq_info)); 607 608 switch (llq_desc->header_location_ctrl) { 609 case ENA_ADMIN_INLINE_HEADER: 610 llq_info->inline_header = true; 611 break; 612 case ENA_ADMIN_HEADER_RING: 613 llq_info->inline_header = false; 614 break; 615 default: 616 ena_trc_err("Invalid header location control\n"); 617 return -EINVAL; 618 } 619 620 switch (llq_desc->entry_size_ctrl) { 621 case ENA_ADMIN_LIST_ENTRY_SIZE_128B: 622 llq_info->desc_list_entry_size = 128; 623 break; 624 case ENA_ADMIN_LIST_ENTRY_SIZE_192B: 625 llq_info->desc_list_entry_size = 192; 626 break; 627 case ENA_ADMIN_LIST_ENTRY_SIZE_256B: 628 llq_info->desc_list_entry_size = 256; 629 break; 630 default: 631 ena_trc_err("Invalid entry_size_ctrl %d\n", 632 llq_desc->entry_size_ctrl); 633 return -EINVAL; 634 } 635 636 if ((llq_info->desc_list_entry_size & 0x7)) { 637 /* The desc list entry size should be whole multiply of 8 638 * This requirement comes from __iowrite64_copy() 639 */ 640 ena_trc_err("illegal entry size %d\n", 641 llq_info->desc_list_entry_size); 642 return -EINVAL; 643 } 644 645 if (llq_info->inline_header) { 646 llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl; 647 if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) && 648 (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) { 649 ena_trc_err("Invalid desc_stride_ctrl %d\n", 650 llq_info->desc_stride_ctrl); 651 return -EINVAL; 652 } 653 } else { 654 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; 655 } 656 657 if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY) 658 llq_info->descs_per_entry = llq_info->desc_list_entry_size / 659 sizeof(struct ena_eth_io_tx_desc); 660 else 661 llq_info->descs_per_entry = 1; 662 663 llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl; 664 665 return 0; 666} 667 668 669 670static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 671 struct ena_com_admin_queue *admin_queue) 672{ 673 unsigned long flags; 674 int ret; 675 676 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, 677 admin_queue->completion_timeout); 678 679 /* In case the command wasn't completed find out the root cause. 680 * There might be 2 kinds of errors 681 * 1) No completion (timeout reached) 682 * 2) There is completion but the device didn't get any msi-x interrupt. 683 */ 684 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 685 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 686 ena_com_handle_admin_completion(admin_queue); 687 admin_queue->stats.no_completion++; 688 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 689 690 if (comp_ctx->status == ENA_CMD_COMPLETED) 691 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", 692 comp_ctx->cmd_opcode); 693 else 694 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", 695 comp_ctx->cmd_opcode, comp_ctx->status); 696 697 admin_queue->running_state = false; 698 ret = ENA_COM_TIMER_EXPIRED; 699 goto err; 700 } 701 702 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 703err: 704 comp_ctxt_release(admin_queue, comp_ctx); 705 return ret; 706} 707 708/* This method read the hardware device register through posting writes 709 * and waiting for response 710 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 711 */ 712static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 713{ 714 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 715 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 716 mmio_read->read_resp; 717 u32 mmio_read_reg, ret, i; 718 unsigned long flags; 719 u32 timeout = mmio_read->reg_read_to; 720 721 ENA_MIGHT_SLEEP(); 722 723 if (timeout == 0) 724 timeout = ENA_REG_READ_TIMEOUT; 725 726 /* If readless is disabled, perform regular read */ 727 if (!mmio_read->readless_supported) 728 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset); 729 730 ENA_SPINLOCK_LOCK(mmio_read->lock, flags); 731 mmio_read->seq_num++; 732 733 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 734 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 735 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 736 mmio_read_reg |= mmio_read->seq_num & 737 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 738 739 /* make sure read_resp->req_id get updated before the hw can write 740 * there 741 */ 742 wmb(); 743 744 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 745 746 for (i = 0; i < timeout; i++) { 747 if (read_resp->req_id == mmio_read->seq_num) 748 break; 749 750 ENA_UDELAY(1); 751 } 752 753 if (unlikely(i == timeout)) { 754 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 755 mmio_read->seq_num, 756 offset, 757 read_resp->req_id, 758 read_resp->reg_off); 759 ret = ENA_MMIO_READ_TIMEOUT; 760 goto err; 761 } 762 763 if (read_resp->reg_off != offset) { 764 ena_trc_err("Read failure: wrong offset provided"); 765 ret = ENA_MMIO_READ_TIMEOUT; 766 } else { 767 ret = read_resp->reg_val; 768 } 769err: 770 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); 771 772 return ret; 773} 774 775/* There are two types to wait for completion. 776 * Polling mode - wait until the completion is available. 777 * Async mode - wait on wait queue until the completion is ready 778 * (or the timeout expired). 779 * It is expected that the IRQ called ena_com_handle_admin_completion 780 * to mark the completions. 781 */ 782static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 783 struct ena_com_admin_queue *admin_queue) 784{ 785 if (admin_queue->polling) 786 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 787 admin_queue); 788 789 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 790 admin_queue); 791} 792 793static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 794 struct ena_com_io_sq *io_sq) 795{ 796 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 797 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 798 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 799 u8 direction; 800 int ret; 801 802 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 803 804 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 805 direction = ENA_ADMIN_SQ_DIRECTION_TX; 806 else 807 direction = ENA_ADMIN_SQ_DIRECTION_RX; 808 809 destroy_cmd.sq.sq_identity |= (direction << 810 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 811 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 812 813 destroy_cmd.sq.sq_idx = io_sq->idx; 814 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 815 816 ret = ena_com_execute_admin_command(admin_queue, 817 (struct ena_admin_aq_entry *)&destroy_cmd, 818 sizeof(destroy_cmd), 819 (struct ena_admin_acq_entry *)&destroy_resp, 820 sizeof(destroy_resp)); 821 822 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 823 ena_trc_err("failed to destroy io sq error: %d\n", ret); 824 825 return ret; 826} 827 828static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 829 struct ena_com_io_sq *io_sq, 830 struct ena_com_io_cq *io_cq) 831{ 832 size_t size; 833 834 if (io_cq->cdesc_addr.virt_addr) { 835 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 836 837 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 838 size, 839 io_cq->cdesc_addr.virt_addr, 840 io_cq->cdesc_addr.phys_addr, 841 io_cq->cdesc_addr.mem_handle); 842 843 io_cq->cdesc_addr.virt_addr = NULL; 844 } 845 846 if (io_sq->desc_addr.virt_addr) { 847 size = io_sq->desc_entry_size * io_sq->q_depth; 848 849 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 850 size, 851 io_sq->desc_addr.virt_addr, 852 io_sq->desc_addr.phys_addr, 853 io_sq->desc_addr.mem_handle); 854 855 io_sq->desc_addr.virt_addr = NULL; 856 } 857 858 if (io_sq->bounce_buf_ctrl.base_buffer) { 859 size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 860 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer, size); 861 io_sq->bounce_buf_ctrl.base_buffer = NULL; 862 } 863} 864 865static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 866 u16 exp_state) 867{ 868 u32 val, i; 869 870 for (i = 0; i < timeout; i++) { 871 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 872 873 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 874 ena_trc_err("Reg read timeout occurred\n"); 875 return ENA_COM_TIMER_EXPIRED; 876 } 877 878 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 879 exp_state) 880 return 0; 881 882 /* The resolution of the timeout is 100ms */ 883 ENA_MSLEEP(100); 884 } 885 886 return ENA_COM_TIMER_EXPIRED; 887} 888 889static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 890 enum ena_admin_aq_feature_id feature_id) 891{ 892 u32 feature_mask = 1 << feature_id; 893 894 /* Device attributes is always supported */ 895 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 896 !(ena_dev->supported_features & feature_mask)) 897 return false; 898 899 return true; 900} 901 902static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 903 struct ena_admin_get_feat_resp *get_resp, 904 enum ena_admin_aq_feature_id feature_id, 905 dma_addr_t control_buf_dma_addr, 906 u32 control_buff_size) 907{ 908 struct ena_com_admin_queue *admin_queue; 909 struct ena_admin_get_feat_cmd get_cmd; 910 int ret; 911 912 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 913 ena_trc_dbg("Feature %d isn't supported\n", feature_id); 914 return ENA_COM_UNSUPPORTED; 915 } 916 917 memset(&get_cmd, 0x0, sizeof(get_cmd)); 918 admin_queue = &ena_dev->admin_queue; 919 920 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 921 922 if (control_buff_size) 923 get_cmd.aq_common_descriptor.flags = 924 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 925 else 926 get_cmd.aq_common_descriptor.flags = 0; 927 928 ret = ena_com_mem_addr_set(ena_dev, 929 &get_cmd.control_buffer.address, 930 control_buf_dma_addr); 931 if (unlikely(ret)) { 932 ena_trc_err("memory address set failed\n"); 933 return ret; 934 } 935 936 get_cmd.control_buffer.length = control_buff_size; 937 938 get_cmd.feat_common.feature_id = feature_id; 939 940 ret = ena_com_execute_admin_command(admin_queue, 941 (struct ena_admin_aq_entry *) 942 &get_cmd, 943 sizeof(get_cmd), 944 (struct ena_admin_acq_entry *) 945 get_resp, 946 sizeof(*get_resp)); 947 948 if (unlikely(ret)) 949 ena_trc_err("Failed to submit get_feature command %d error: %d\n", 950 feature_id, ret); 951 952 return ret; 953} 954 955static int ena_com_get_feature(struct ena_com_dev *ena_dev, 956 struct ena_admin_get_feat_resp *get_resp, 957 enum ena_admin_aq_feature_id feature_id) 958{ 959 return ena_com_get_feature_ex(ena_dev, 960 get_resp, 961 feature_id, 962 0, 963 0); 964} 965 966static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 967{ 968 struct ena_rss *rss = &ena_dev->rss; 969 970 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 971 sizeof(*rss->hash_key), 972 rss->hash_key, 973 rss->hash_key_dma_addr, 974 rss->hash_key_mem_handle); 975 976 if (unlikely(!rss->hash_key)) 977 return ENA_COM_NO_MEM; 978 979 return 0; 980} 981 982static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 983{ 984 struct ena_rss *rss = &ena_dev->rss; 985 986 if (rss->hash_key) 987 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 988 sizeof(*rss->hash_key), 989 rss->hash_key, 990 rss->hash_key_dma_addr, 991 rss->hash_key_mem_handle); 992 rss->hash_key = NULL; 993} 994 995static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 996{ 997 struct ena_rss *rss = &ena_dev->rss; 998 999 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1000 sizeof(*rss->hash_ctrl), 1001 rss->hash_ctrl, 1002 rss->hash_ctrl_dma_addr, 1003 rss->hash_ctrl_mem_handle); 1004 1005 if (unlikely(!rss->hash_ctrl)) 1006 return ENA_COM_NO_MEM; 1007 1008 return 0; 1009} 1010 1011static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 1012{ 1013 struct ena_rss *rss = &ena_dev->rss; 1014 1015 if (rss->hash_ctrl) 1016 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1017 sizeof(*rss->hash_ctrl), 1018 rss->hash_ctrl, 1019 rss->hash_ctrl_dma_addr, 1020 rss->hash_ctrl_mem_handle); 1021 rss->hash_ctrl = NULL; 1022} 1023 1024static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 1025 u16 log_size) 1026{ 1027 struct ena_rss *rss = &ena_dev->rss; 1028 struct ena_admin_get_feat_resp get_resp; 1029 size_t tbl_size; 1030 int ret; 1031 1032 ret = ena_com_get_feature(ena_dev, &get_resp, 1033 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 1034 if (unlikely(ret)) 1035 return ret; 1036 1037 if ((get_resp.u.ind_table.min_size > log_size) || 1038 (get_resp.u.ind_table.max_size < log_size)) { 1039 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1040 1 << log_size, 1041 1 << get_resp.u.ind_table.min_size, 1042 1 << get_resp.u.ind_table.max_size); 1043 return ENA_COM_INVAL; 1044 } 1045 1046 tbl_size = (1ULL << log_size) * 1047 sizeof(struct ena_admin_rss_ind_table_entry); 1048 1049 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1050 tbl_size, 1051 rss->rss_ind_tbl, 1052 rss->rss_ind_tbl_dma_addr, 1053 rss->rss_ind_tbl_mem_handle); 1054 if (unlikely(!rss->rss_ind_tbl)) 1055 goto mem_err1; 1056 1057 rss->host_rss_ind_tbl_size = (1ULL << log_size) * sizeof(u16); 1058 rss->host_rss_ind_tbl = 1059 ENA_MEM_ALLOC(ena_dev->dmadev, rss->host_rss_ind_tbl_size); 1060 if (unlikely(!rss->host_rss_ind_tbl)) 1061 goto mem_err2; 1062 1063 rss->tbl_log_size = log_size; 1064 1065 return 0; 1066 1067mem_err2: 1068 tbl_size = (1ULL << log_size) * 1069 sizeof(struct ena_admin_rss_ind_table_entry); 1070 1071 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1072 tbl_size, 1073 rss->rss_ind_tbl, 1074 rss->rss_ind_tbl_dma_addr, 1075 rss->rss_ind_tbl_mem_handle); 1076 rss->rss_ind_tbl = NULL; 1077mem_err1: 1078 rss->tbl_log_size = 0; 1079 return ENA_COM_NO_MEM; 1080} 1081 1082static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 1083{ 1084 struct ena_rss *rss = &ena_dev->rss; 1085 size_t tbl_size = (1ULL << rss->tbl_log_size) * 1086 sizeof(struct ena_admin_rss_ind_table_entry); 1087 1088 if (rss->rss_ind_tbl) 1089 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1090 tbl_size, 1091 rss->rss_ind_tbl, 1092 rss->rss_ind_tbl_dma_addr, 1093 rss->rss_ind_tbl_mem_handle); 1094 rss->rss_ind_tbl = NULL; 1095 1096 if (rss->host_rss_ind_tbl) 1097 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl, 1098 rss->host_rss_ind_tbl_size); 1099 rss->host_rss_ind_tbl = NULL; 1100} 1101 1102static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 1103 struct ena_com_io_sq *io_sq, u16 cq_idx) 1104{ 1105 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1106 struct ena_admin_aq_create_sq_cmd create_cmd; 1107 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1108 u8 direction; 1109 int ret; 1110 1111 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1112 1113 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1114 1115 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1116 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1117 else 1118 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1119 1120 create_cmd.sq_identity |= (direction << 1121 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1122 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1123 1124 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1125 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1126 1127 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1128 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1129 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1130 1131 create_cmd.sq_caps_3 |= 1132 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1133 1134 create_cmd.cq_idx = cq_idx; 1135 create_cmd.sq_depth = io_sq->q_depth; 1136 1137 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1138 ret = ena_com_mem_addr_set(ena_dev, 1139 &create_cmd.sq_ba, 1140 io_sq->desc_addr.phys_addr); 1141 if (unlikely(ret)) { 1142 ena_trc_err("memory address set failed\n"); 1143 return ret; 1144 } 1145 } 1146 1147 ret = ena_com_execute_admin_command(admin_queue, 1148 (struct ena_admin_aq_entry *)&create_cmd, 1149 sizeof(create_cmd), 1150 (struct ena_admin_acq_entry *)&cmd_completion, 1151 sizeof(cmd_completion)); 1152 if (unlikely(ret)) { 1153 ena_trc_err("Failed to create IO SQ. error: %d\n", ret); 1154 return ret; 1155 } 1156 1157 io_sq->idx = cmd_completion.sq_idx; 1158 1159 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1160 (uintptr_t)cmd_completion.sq_doorbell_offset); 1161 1162 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1163 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1164 + cmd_completion.llq_headers_offset); 1165 1166 io_sq->desc_addr.pbuf_dev_addr = 1167 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1168 cmd_completion.llq_descriptors_offset); 1169 } 1170 1171 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1172 1173 return ret; 1174} 1175 1176static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1177{ 1178 struct ena_rss *rss = &ena_dev->rss; 1179 struct ena_com_io_sq *io_sq; 1180 u16 qid; 1181 int i; 1182 1183 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1184 qid = rss->host_rss_ind_tbl[i]; 1185 if (qid >= ENA_TOTAL_NUM_QUEUES) 1186 return ENA_COM_INVAL; 1187 1188 io_sq = &ena_dev->io_sq_queues[qid]; 1189 1190 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1191 return ENA_COM_INVAL; 1192 1193 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1194 } 1195 1196 return 0; 1197} 1198 1199static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) 1200{ 1201 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; 1202 struct ena_rss *rss = &ena_dev->rss; 1203 u8 idx; 1204 u16 i; 1205 1206 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) 1207 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; 1208 1209 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1210 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) 1211 return ENA_COM_INVAL; 1212 idx = (u8)rss->rss_ind_tbl[i].cq_idx; 1213 1214 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) 1215 return ENA_COM_INVAL; 1216 1217 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; 1218 } 1219 1220 return 0; 1221} 1222 1223static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) 1224{ 1225 size_t size; 1226 1227 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; 1228 1229 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size); 1230 if (!ena_dev->intr_moder_tbl) 1231 return ENA_COM_NO_MEM; 1232 1233 ena_com_config_default_interrupt_moderation_table(ena_dev); 1234 1235 return 0; 1236} 1237 1238static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1239 u16 intr_delay_resolution) 1240{ 1241 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 1242 unsigned int i; 1243 1244 if (!intr_delay_resolution) { 1245 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1246 intr_delay_resolution = 1; 1247 } 1248 ena_dev->intr_delay_resolution = intr_delay_resolution; 1249 1250 /* update Rx */ 1251 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) 1252 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; 1253 1254 /* update Tx */ 1255 ena_dev->intr_moder_tx_interval /= intr_delay_resolution; 1256} 1257 1258/*****************************************************************************/ 1259/******************************* API ******************************/ 1260/*****************************************************************************/ 1261 1262int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1263 struct ena_admin_aq_entry *cmd, 1264 size_t cmd_size, 1265 struct ena_admin_acq_entry *comp, 1266 size_t comp_size) 1267{ 1268 struct ena_comp_ctx *comp_ctx; 1269 int ret; 1270 1271 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1272 comp, comp_size); 1273 if (unlikely(IS_ERR(comp_ctx))) { 1274 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE)) 1275 ena_trc_dbg("Failed to submit command [%ld]\n", 1276 PTR_ERR(comp_ctx)); 1277 else 1278 ena_trc_err("Failed to submit command [%ld]\n", 1279 PTR_ERR(comp_ctx)); 1280 1281 return PTR_ERR(comp_ctx); 1282 } 1283 1284 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1285 if (unlikely(ret)) { 1286 if (admin_queue->running_state) 1287 ena_trc_err("Failed to process command. ret = %d\n", 1288 ret); 1289 else 1290 ena_trc_dbg("Failed to process command. ret = %d\n", 1291 ret); 1292 } 1293 return ret; 1294} 1295 1296int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1297 struct ena_com_io_cq *io_cq) 1298{ 1299 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1300 struct ena_admin_aq_create_cq_cmd create_cmd; 1301 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1302 int ret; 1303 1304 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1305 1306 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1307 1308 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1309 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1310 create_cmd.cq_caps_1 |= 1311 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1312 1313 create_cmd.msix_vector = io_cq->msix_vector; 1314 create_cmd.cq_depth = io_cq->q_depth; 1315 1316 ret = ena_com_mem_addr_set(ena_dev, 1317 &create_cmd.cq_ba, 1318 io_cq->cdesc_addr.phys_addr); 1319 if (unlikely(ret)) { 1320 ena_trc_err("memory address set failed\n"); 1321 return ret; 1322 } 1323 1324 ret = ena_com_execute_admin_command(admin_queue, 1325 (struct ena_admin_aq_entry *)&create_cmd, 1326 sizeof(create_cmd), 1327 (struct ena_admin_acq_entry *)&cmd_completion, 1328 sizeof(cmd_completion)); 1329 if (unlikely(ret)) { 1330 ena_trc_err("Failed to create IO CQ. error: %d\n", ret); 1331 return ret; 1332 } 1333 1334 io_cq->idx = cmd_completion.cq_idx; 1335 1336 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1337 cmd_completion.cq_interrupt_unmask_register_offset); 1338 1339 if (cmd_completion.cq_head_db_register_offset) 1340 io_cq->cq_head_db_reg = 1341 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1342 cmd_completion.cq_head_db_register_offset); 1343 1344 if (cmd_completion.numa_node_register_offset) 1345 io_cq->numa_node_cfg_reg = 1346 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1347 cmd_completion.numa_node_register_offset); 1348 1349 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1350 1351 return ret; 1352} 1353 1354int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1355 struct ena_com_io_sq **io_sq, 1356 struct ena_com_io_cq **io_cq) 1357{ 1358 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1359 ena_trc_err("Invalid queue number %d but the max is %d\n", 1360 qid, ENA_TOTAL_NUM_QUEUES); 1361 return ENA_COM_INVAL; 1362 } 1363 1364 *io_sq = &ena_dev->io_sq_queues[qid]; 1365 *io_cq = &ena_dev->io_cq_queues[qid]; 1366 1367 return 0; 1368} 1369 1370void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1371{ 1372 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1373 struct ena_comp_ctx *comp_ctx; 1374 u16 i; 1375 1376 if (!admin_queue->comp_ctx) 1377 return; 1378 1379 for (i = 0; i < admin_queue->q_depth; i++) { 1380 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1381 if (unlikely(!comp_ctx)) 1382 break; 1383 1384 comp_ctx->status = ENA_CMD_ABORTED; 1385 1386 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 1387 } 1388} 1389 1390void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1391{ 1392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1393 unsigned long flags; 1394 1395 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1396 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { 1397 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1398 ENA_MSLEEP(20); 1399 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1400 } 1401 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1402} 1403 1404int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1405 struct ena_com_io_cq *io_cq) 1406{ 1407 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1408 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1409 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1410 int ret; 1411 1412 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1413 1414 destroy_cmd.cq_idx = io_cq->idx; 1415 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1416 1417 ret = ena_com_execute_admin_command(admin_queue, 1418 (struct ena_admin_aq_entry *)&destroy_cmd, 1419 sizeof(destroy_cmd), 1420 (struct ena_admin_acq_entry *)&destroy_resp, 1421 sizeof(destroy_resp)); 1422 1423 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 1424 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); 1425 1426 return ret; 1427} 1428 1429bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1430{ 1431 return ena_dev->admin_queue.running_state; 1432} 1433 1434void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1435{ 1436 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1437 unsigned long flags; 1438 1439 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1440 ena_dev->admin_queue.running_state = state; 1441 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1442} 1443 1444void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1445{ 1446 u16 depth = ena_dev->aenq.q_depth; 1447 1448 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1449 1450 /* Init head_db to mark that all entries in the queue 1451 * are initially available 1452 */ 1453 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1454} 1455 1456int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1457{ 1458 struct ena_com_admin_queue *admin_queue; 1459 struct ena_admin_set_feat_cmd cmd; 1460 struct ena_admin_set_feat_resp resp; 1461 struct ena_admin_get_feat_resp get_resp; 1462 int ret; 1463 1464 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); 1465 if (ret) { 1466 ena_trc_info("Can't get aenq configuration\n"); 1467 return ret; 1468 } 1469 1470 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1471 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", 1472 get_resp.u.aenq.supported_groups, 1473 groups_flag); 1474 return ENA_COM_UNSUPPORTED; 1475 } 1476 1477 memset(&cmd, 0x0, sizeof(cmd)); 1478 admin_queue = &ena_dev->admin_queue; 1479 1480 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1481 cmd.aq_common_descriptor.flags = 0; 1482 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1483 cmd.u.aenq.enabled_groups = groups_flag; 1484 1485 ret = ena_com_execute_admin_command(admin_queue, 1486 (struct ena_admin_aq_entry *)&cmd, 1487 sizeof(cmd), 1488 (struct ena_admin_acq_entry *)&resp, 1489 sizeof(resp)); 1490 1491 if (unlikely(ret)) 1492 ena_trc_err("Failed to config AENQ ret: %d\n", ret); 1493 1494 return ret; 1495} 1496 1497int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1498{ 1499 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1500 int width; 1501 1502 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1503 ena_trc_err("Reg read timeout occurred\n"); 1504 return ENA_COM_TIMER_EXPIRED; 1505 } 1506 1507 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1508 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1509 1510 ena_trc_dbg("ENA dma width: %d\n", width); 1511 1512 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1513 ena_trc_err("DMA width illegal value: %d\n", width); 1514 return ENA_COM_INVAL; 1515 } 1516 1517 ena_dev->dma_addr_bits = width; 1518 1519 return width; 1520} 1521 1522int ena_com_validate_version(struct ena_com_dev *ena_dev) 1523{ 1524 u32 ver; 1525 u32 ctrl_ver; 1526 u32 ctrl_ver_masked; 1527 1528 /* Make sure the ENA version and the controller version are at least 1529 * as the driver expects 1530 */ 1531 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1532 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1533 ENA_REGS_CONTROLLER_VERSION_OFF); 1534 1535 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1536 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1537 ena_trc_err("Reg read timeout occurred\n"); 1538 return ENA_COM_TIMER_EXPIRED; 1539 } 1540 1541 ena_trc_info("ena device version: %d.%d\n", 1542 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1543 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1544 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1545 1546 if (ver < MIN_ENA_VER) { 1547 ena_trc_err("ENA version is lower than the minimal version the driver supports\n"); 1548 return -1; 1549 } 1550 1551 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n", 1552 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) 1553 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1554 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) 1555 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1556 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1557 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1558 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1559 1560 ctrl_ver_masked = 1561 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1562 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1563 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1564 1565 /* Validate the ctrl version without the implementation ID */ 1566 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1567 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1568 return -1; 1569 } 1570 1571 return 0; 1572} 1573 1574void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1575{ 1576 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1577 struct ena_com_admin_cq *cq = &admin_queue->cq; 1578 struct ena_com_admin_sq *sq = &admin_queue->sq; 1579 struct ena_com_aenq *aenq = &ena_dev->aenq; 1580 u16 size; 1581 1582 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); 1583 1584 ENA_SPINLOCK_DESTROY(admin_queue->q_lock); 1585 1586 if (admin_queue->comp_ctx) { 1587 size_t s = admin_queue->q_depth * sizeof(struct ena_comp_ctx); 1588 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx, s); 1589 } 1590 admin_queue->comp_ctx = NULL; 1591 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1592 if (sq->entries) 1593 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, 1594 sq->dma_addr, sq->mem_handle); 1595 sq->entries = NULL; 1596 1597 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1598 if (cq->entries) 1599 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries, 1600 cq->dma_addr, cq->mem_handle); 1601 cq->entries = NULL; 1602 1603 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1604 if (ena_dev->aenq.entries) 1605 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries, 1606 aenq->dma_addr, aenq->mem_handle); 1607 aenq->entries = NULL; 1608} 1609 1610void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1611{ 1612 u32 mask_value = 0; 1613 1614 if (polling) 1615 mask_value = ENA_REGS_ADMIN_INTR_MASK; 1616 1617 ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1618 ena_dev->admin_queue.polling = polling; 1619} 1620 1621int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1622{ 1623 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1624 1625 ENA_SPINLOCK_INIT(mmio_read->lock); 1626 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1627 sizeof(*mmio_read->read_resp), 1628 mmio_read->read_resp, 1629 mmio_read->read_resp_dma_addr, 1630 mmio_read->read_resp_mem_handle); 1631 if (unlikely(!mmio_read->read_resp)) 1632 return ENA_COM_NO_MEM; 1633 1634 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1635 1636 mmio_read->read_resp->req_id = 0x0; 1637 mmio_read->seq_num = 0x0; 1638 mmio_read->readless_supported = true; 1639 1640 return 0; 1641} 1642 1643void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1644{ 1645 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1646 1647 mmio_read->readless_supported = readless_supported; 1648} 1649 1650void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1651{ 1652 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1653 1654 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1655 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1656 1657 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1658 sizeof(*mmio_read->read_resp), 1659 mmio_read->read_resp, 1660 mmio_read->read_resp_dma_addr, 1661 mmio_read->read_resp_mem_handle); 1662 1663 mmio_read->read_resp = NULL; 1664 1665 ENA_SPINLOCK_DESTROY(mmio_read->lock); 1666} 1667 1668void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1669{ 1670 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1671 u32 addr_low, addr_high; 1672 1673 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1674 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1675 1676 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1677 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1678} 1679 1680int ena_com_admin_init(struct ena_com_dev *ena_dev, 1681 struct ena_aenq_handlers *aenq_handlers, 1682 bool init_spinlock) 1683{ 1684 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1685 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1686 int ret; 1687 1688#ifdef ENA_INTERNAL 1689 ena_trc_info("ena_defs : Version:[%s] Build date [%s]", 1690 ENA_GEN_COMMIT, ENA_GEN_DATE); 1691#endif 1692 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1693 1694 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1695 ena_trc_err("Reg read timeout occurred\n"); 1696 return ENA_COM_TIMER_EXPIRED; 1697 } 1698 1699 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1700 ena_trc_err("Device isn't ready, abort com init\n"); 1701 return ENA_COM_NO_DEVICE; 1702 } 1703 1704 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1705 1706 admin_queue->bus = ena_dev->bus; 1707 admin_queue->q_dmadev = ena_dev->dmadev; 1708 admin_queue->polling = false; 1709 admin_queue->curr_cmd_id = 0; 1710 1711 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0); 1712 1713 if (init_spinlock) 1714 ENA_SPINLOCK_INIT(admin_queue->q_lock); 1715 1716 ret = ena_com_init_comp_ctxt(admin_queue); 1717 if (ret) 1718 goto error; 1719 1720 ret = ena_com_admin_init_sq(admin_queue); 1721 if (ret) 1722 goto error; 1723 1724 ret = ena_com_admin_init_cq(admin_queue); 1725 if (ret) 1726 goto error; 1727 1728 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1729 ENA_REGS_AQ_DB_OFF); 1730 1731 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1732 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1733 1734 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1735 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1736 1737 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1738 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1739 1740 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1741 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1742 1743 aq_caps = 0; 1744 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1745 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1746 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1747 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1748 1749 acq_caps = 0; 1750 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1751 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1752 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1753 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1754 1755 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1756 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1757 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1758 if (ret) 1759 goto error; 1760 1761 admin_queue->running_state = true; 1762 1763 return 0; 1764error: 1765 ena_com_admin_destroy(ena_dev); 1766 1767 return ret; 1768} 1769 1770int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1771 struct ena_com_create_io_ctx *ctx) 1772{ 1773 struct ena_com_io_sq *io_sq; 1774 struct ena_com_io_cq *io_cq; 1775 int ret; 1776 1777 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1778 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", 1779 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1780 return ENA_COM_INVAL; 1781 } 1782 1783 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1784 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1785 1786 memset(io_sq, 0x0, sizeof(*io_sq)); 1787 memset(io_cq, 0x0, sizeof(*io_cq)); 1788 1789 /* Init CQ */ 1790 io_cq->q_depth = ctx->queue_size; 1791 io_cq->direction = ctx->direction; 1792 io_cq->qid = ctx->qid; 1793 1794 io_cq->msix_vector = ctx->msix_vector; 1795 1796 io_sq->q_depth = ctx->queue_size; 1797 io_sq->direction = ctx->direction; 1798 io_sq->qid = ctx->qid; 1799 1800 io_sq->mem_queue_type = ctx->mem_queue_type; 1801 1802 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1803 /* header length is limited to 8 bits */ 1804 io_sq->tx_max_header_size = 1805 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256); 1806 1807 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1808 if (ret) 1809 goto error; 1810 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1811 if (ret) 1812 goto error; 1813 1814 ret = ena_com_create_io_cq(ena_dev, io_cq); 1815 if (ret) 1816 goto error; 1817 1818 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1819 if (ret) 1820 goto destroy_io_cq; 1821 1822 return 0; 1823 1824destroy_io_cq: 1825 ena_com_destroy_io_cq(ena_dev, io_cq); 1826error: 1827 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1828 return ret; 1829} 1830 1831void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1832{ 1833 struct ena_com_io_sq *io_sq; 1834 struct ena_com_io_cq *io_cq; 1835 1836 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1837 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", 1838 qid, ENA_TOTAL_NUM_QUEUES); 1839 return; 1840 } 1841 1842 io_sq = &ena_dev->io_sq_queues[qid]; 1843 io_cq = &ena_dev->io_cq_queues[qid]; 1844 1845 ena_com_destroy_io_sq(ena_dev, io_sq); 1846 ena_com_destroy_io_cq(ena_dev, io_cq); 1847 1848 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1849} 1850 1851int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1852 struct ena_admin_get_feat_resp *resp) 1853{ 1854 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); 1855} 1856 1857int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1858 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1859{ 1860 struct ena_admin_get_feat_resp get_resp; 1861 int rc; 1862 1863 rc = ena_com_get_feature(ena_dev, &get_resp, 1864 ENA_ADMIN_DEVICE_ATTRIBUTES); 1865 if (rc) 1866 return rc; 1867 1868 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 1869 sizeof(get_resp.u.dev_attr)); 1870 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 1871 1872 rc = ena_com_get_feature(ena_dev, &get_resp, 1873 ENA_ADMIN_MAX_QUEUES_NUM); 1874 if (rc) 1875 return rc; 1876 1877 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 1878 sizeof(get_resp.u.max_queue)); 1879 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; 1880 1881 rc = ena_com_get_feature(ena_dev, &get_resp, 1882 ENA_ADMIN_AENQ_CONFIG); 1883 if (rc) 1884 return rc; 1885 1886 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 1887 sizeof(get_resp.u.aenq)); 1888 1889 rc = ena_com_get_feature(ena_dev, &get_resp, 1890 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); 1891 if (rc) 1892 return rc; 1893 1894 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 1895 sizeof(get_resp.u.offload)); 1896 1897 /* Driver hints isn't mandatory admin command. So in case the 1898 * command isn't supported set driver hints to 0 1899 */ 1900 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS); 1901 1902 if (!rc) 1903 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 1904 sizeof(get_resp.u.hw_hints)); 1905 else if (rc == ENA_COM_UNSUPPORTED) 1906 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); 1907 else 1908 return rc; 1909 1910 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ); 1911 if (!rc) 1912 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, 1913 sizeof(get_resp.u.llq)); 1914 else if (rc == ENA_COM_UNSUPPORTED) 1915 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); 1916 else 1917 return rc; 1918 1919 return 0; 1920} 1921 1922void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 1923{ 1924 ena_com_handle_admin_completion(&ena_dev->admin_queue); 1925} 1926 1927/* ena_handle_specific_aenq_event: 1928 * return the handler that is relevant to the specific event group 1929 */ 1930static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, 1931 u16 group) 1932{ 1933 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; 1934 1935 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 1936 return aenq_handlers->handlers[group]; 1937 1938 return aenq_handlers->unimplemented_handler; 1939} 1940 1941/* ena_aenq_intr_handler: 1942 * handles the aenq incoming events. 1943 * pop events from the queue and apply the specific handler 1944 */ 1945void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) 1946{ 1947 struct ena_admin_aenq_entry *aenq_e; 1948 struct ena_admin_aenq_common_desc *aenq_common; 1949 struct ena_com_aenq *aenq = &dev->aenq; 1950 ena_aenq_handler handler_cb; 1951 unsigned long long timestamp; 1952 u16 masked_head, processed = 0; 1953 u8 phase; 1954 1955 masked_head = aenq->head & (aenq->q_depth - 1); 1956 phase = aenq->phase; 1957 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 1958 aenq_common = &aenq_e->aenq_common_desc; 1959 1960 /* Go over all the events */ 1961 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1962 phase) { 1963 timestamp = (unsigned long long)aenq_common->timestamp_low | 1964 ((unsigned long long)aenq_common->timestamp_high << 32); 1965 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 1966 aenq_common->group, 1967 aenq_common->syndrom, 1968 timestamp); 1969 1970 /* Handle specific event*/ 1971 handler_cb = ena_com_get_specific_aenq_cb(dev, 1972 aenq_common->group); 1973 handler_cb(data, aenq_e); /* call the actual event handler*/ 1974 1975 /* Get next event entry */ 1976 masked_head++; 1977 processed++; 1978 1979 if (unlikely(masked_head == aenq->q_depth)) { 1980 masked_head = 0; 1981 phase = !phase; 1982 } 1983 aenq_e = &aenq->entries[masked_head]; 1984 aenq_common = &aenq_e->aenq_common_desc; 1985 } 1986 1987 aenq->head += processed; 1988 aenq->phase = phase; 1989 1990 /* Don't update aenq doorbell if there weren't any processed events */ 1991 if (!processed) 1992 return; 1993 1994 /* write the aenq doorbell after all AENQ descriptors were read */ 1995 mb(); 1996 ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1997} 1998#ifdef ENA_EXTENDED_STATS 1999/* 2000 * Sets the function Idx and Queue Idx to be used for 2001 * get full statistics feature 2002 * 2003 */ 2004int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, 2005 u32 func_queue) 2006{ 2007 2008 /* Function & Queue is acquired from user in the following format : 2009 * Bottom Half word: funct 2010 * Top Half Word: queue 2011 */ 2012 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue); 2013 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue); 2014 2015 return 0; 2016} 2017 2018#endif /* ENA_EXTENDED_STATS */ 2019 2020int ena_com_dev_reset(struct ena_com_dev *ena_dev, 2021 enum ena_regs_reset_reason_types reset_reason) 2022{ 2023 u32 stat, timeout, cap, reset_val; 2024 int rc; 2025 2026 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 2027 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 2028 2029 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 2030 (cap == ENA_MMIO_READ_TIMEOUT))) { 2031 ena_trc_err("Reg read32 timeout occurred\n"); 2032 return ENA_COM_TIMER_EXPIRED; 2033 } 2034 2035 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 2036 ena_trc_err("Device isn't ready, can't reset device\n"); 2037 return ENA_COM_INVAL; 2038 } 2039 2040 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 2041 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 2042 if (timeout == 0) { 2043 ena_trc_err("Invalid timeout value\n"); 2044 return ENA_COM_INVAL; 2045 } 2046 2047 /* start reset */ 2048 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 2049 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & 2050 ENA_REGS_DEV_CTL_RESET_REASON_MASK; 2051 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2052 2053 /* Write again the MMIO read request address */ 2054 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 2055 2056 rc = wait_for_reset_state(ena_dev, timeout, 2057 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 2058 if (rc != 0) { 2059 ena_trc_err("Reset indication didn't turn on\n"); 2060 return rc; 2061 } 2062 2063 /* reset done */ 2064 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2065 rc = wait_for_reset_state(ena_dev, timeout, 0); 2066 if (rc != 0) { 2067 ena_trc_err("Reset indication didn't turn off\n"); 2068 return rc; 2069 } 2070 2071 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 2072 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 2073 if (timeout) 2074 /* the resolution of timeout reg is 100ms */ 2075 ena_dev->admin_queue.completion_timeout = timeout * 100000; 2076 else 2077 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 2078 2079 return 0; 2080} 2081 2082static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 2083 struct ena_com_stats_ctx *ctx, 2084 enum ena_admin_get_stats_type type) 2085{ 2086 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 2087 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 2088 struct ena_com_admin_queue *admin_queue; 2089 int ret; 2090 2091 admin_queue = &ena_dev->admin_queue; 2092 2093 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 2094 get_cmd->aq_common_descriptor.flags = 0; 2095 get_cmd->type = type; 2096 2097 ret = ena_com_execute_admin_command(admin_queue, 2098 (struct ena_admin_aq_entry *)get_cmd, 2099 sizeof(*get_cmd), 2100 (struct ena_admin_acq_entry *)get_resp, 2101 sizeof(*get_resp)); 2102 2103 if (unlikely(ret)) 2104 ena_trc_err("Failed to get stats. error: %d\n", ret); 2105 2106 return ret; 2107} 2108 2109int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 2110 struct ena_admin_basic_stats *stats) 2111{ 2112 struct ena_com_stats_ctx ctx; 2113 int ret; 2114 2115 memset(&ctx, 0x0, sizeof(ctx)); 2116 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 2117 if (likely(ret == 0)) 2118 memcpy(stats, &ctx.get_resp.basic_stats, 2119 sizeof(ctx.get_resp.basic_stats)); 2120 2121 return ret; 2122} 2123#ifdef ENA_EXTENDED_STATS 2124 2125int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, 2126 u32 len) 2127{ 2128 struct ena_com_stats_ctx ctx; 2129 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd; 2130 ena_mem_handle_t mem_handle; 2131 void *virt_addr; 2132 dma_addr_t phys_addr; 2133 int ret; 2134 2135 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len, 2136 virt_addr, phys_addr, mem_handle); 2137 if (!virt_addr) { 2138 ret = ENA_COM_NO_MEM; 2139 goto done; 2140 } 2141 memset(&ctx, 0x0, sizeof(ctx)); 2142 ret = ena_com_mem_addr_set(ena_dev, 2143 &get_cmd->u.control_buffer.address, 2144 phys_addr); 2145 if (unlikely(ret)) { 2146 ena_trc_err("memory address set failed\n"); 2147 return ret; 2148 } 2149 get_cmd->u.control_buffer.length = len; 2150 2151 get_cmd->device_id = ena_dev->stats_func; 2152 get_cmd->queue_idx = ena_dev->stats_queue; 2153 2154 ret = ena_get_dev_stats(ena_dev, &ctx, 2155 ENA_ADMIN_GET_STATS_TYPE_EXTENDED); 2156 if (ret < 0) 2157 goto free_ext_stats_mem; 2158 2159 ret = snprintf(buff, len, "%s", (char *)virt_addr); 2160 2161free_ext_stats_mem: 2162 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr, 2163 mem_handle); 2164done: 2165 return ret; 2166} 2167#endif 2168 2169int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 2170{ 2171 struct ena_com_admin_queue *admin_queue; 2172 struct ena_admin_set_feat_cmd cmd; 2173 struct ena_admin_set_feat_resp resp; 2174 int ret; 2175 2176 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 2177 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU); 2178 return ENA_COM_UNSUPPORTED; 2179 } 2180 2181 memset(&cmd, 0x0, sizeof(cmd)); 2182 admin_queue = &ena_dev->admin_queue; 2183 2184 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2185 cmd.aq_common_descriptor.flags = 0; 2186 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 2187 cmd.u.mtu.mtu = mtu; 2188 2189 ret = ena_com_execute_admin_command(admin_queue, 2190 (struct ena_admin_aq_entry *)&cmd, 2191 sizeof(cmd), 2192 (struct ena_admin_acq_entry *)&resp, 2193 sizeof(resp)); 2194 2195 if (unlikely(ret)) 2196 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); 2197 2198 return ret; 2199} 2200 2201int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2202 struct ena_admin_feature_offload_desc *offload) 2203{ 2204 int ret; 2205 struct ena_admin_get_feat_resp resp; 2206 2207 ret = ena_com_get_feature(ena_dev, &resp, 2208 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); 2209 if (unlikely(ret)) { 2210 ena_trc_err("Failed to get offload capabilities %d\n", ret); 2211 return ret; 2212 } 2213 2214 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2215 2216 return 0; 2217} 2218 2219int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2220{ 2221 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2222 struct ena_rss *rss = &ena_dev->rss; 2223 struct ena_admin_set_feat_cmd cmd; 2224 struct ena_admin_set_feat_resp resp; 2225 struct ena_admin_get_feat_resp get_resp; 2226 int ret; 2227 2228 if (!ena_com_check_supported_feature_id(ena_dev, 2229 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2230 ena_trc_dbg("Feature %d isn't supported\n", 2231 ENA_ADMIN_RSS_HASH_FUNCTION); 2232 return ENA_COM_UNSUPPORTED; 2233 } 2234 2235 /* Validate hash function is supported */ 2236 ret = ena_com_get_feature(ena_dev, &get_resp, 2237 ENA_ADMIN_RSS_HASH_FUNCTION); 2238 if (unlikely(ret)) 2239 return ret; 2240 2241 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { 2242 ena_trc_err("Func hash %d isn't supported by device, abort\n", 2243 rss->hash_func); 2244 return ENA_COM_UNSUPPORTED; 2245 } 2246 2247 memset(&cmd, 0x0, sizeof(cmd)); 2248 2249 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2250 cmd.aq_common_descriptor.flags = 2251 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2252 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2253 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2254 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2255 2256 ret = ena_com_mem_addr_set(ena_dev, 2257 &cmd.control_buffer.address, 2258 rss->hash_key_dma_addr); 2259 if (unlikely(ret)) { 2260 ena_trc_err("memory address set failed\n"); 2261 return ret; 2262 } 2263 2264 cmd.control_buffer.length = sizeof(*rss->hash_key); 2265 2266 ret = ena_com_execute_admin_command(admin_queue, 2267 (struct ena_admin_aq_entry *)&cmd, 2268 sizeof(cmd), 2269 (struct ena_admin_acq_entry *)&resp, 2270 sizeof(resp)); 2271 if (unlikely(ret)) { 2272 ena_trc_err("Failed to set hash function %d. error: %d\n", 2273 rss->hash_func, ret); 2274 return ENA_COM_INVAL; 2275 } 2276 2277 return 0; 2278} 2279 2280int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2281 enum ena_admin_hash_functions func, 2282 const u8 *key, u16 key_len, u32 init_val) 2283{ 2284 struct ena_rss *rss = &ena_dev->rss; 2285 struct ena_admin_get_feat_resp get_resp; 2286 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2287 rss->hash_key; 2288 int rc; 2289 2290 /* Make sure size is a mult of DWs */ 2291 if (unlikely(key_len & 0x3)) 2292 return ENA_COM_INVAL; 2293 2294 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2295 ENA_ADMIN_RSS_HASH_FUNCTION, 2296 rss->hash_key_dma_addr, 2297 sizeof(*rss->hash_key)); 2298 if (unlikely(rc)) 2299 return rc; 2300 2301 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { 2302 ena_trc_err("Flow hash function %d isn't supported\n", func); 2303 return ENA_COM_UNSUPPORTED; 2304 } 2305 2306 switch (func) { 2307 case ENA_ADMIN_TOEPLITZ: 2308 if (key_len > sizeof(hash_key->key)) { 2309 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n", 2310 key_len, sizeof(hash_key->key)); 2311 return ENA_COM_INVAL; 2312 } 2313 2314 memcpy(hash_key->key, key, key_len); 2315 rss->hash_init_val = init_val; 2316 hash_key->keys_num = key_len >> 2; 2317 break; 2318 case ENA_ADMIN_CRC32: 2319 rss->hash_init_val = init_val; 2320 break; 2321 default: 2322 ena_trc_err("Invalid hash function (%d)\n", func); 2323 return ENA_COM_INVAL; 2324 } 2325 2326 rc = ena_com_set_hash_function(ena_dev); 2327 2328 /* Restore the old function */ 2329 if (unlikely(rc)) 2330 ena_com_get_hash_function(ena_dev, NULL, NULL); 2331 2332 return rc; 2333} 2334 2335int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2336 enum ena_admin_hash_functions *func, 2337 u8 *key) 2338{ 2339 struct ena_rss *rss = &ena_dev->rss; 2340 struct ena_admin_get_feat_resp get_resp; 2341 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2342 rss->hash_key; 2343 int rc; 2344 2345 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2346 ENA_ADMIN_RSS_HASH_FUNCTION, 2347 rss->hash_key_dma_addr, 2348 sizeof(*rss->hash_key)); 2349 if (unlikely(rc)) 2350 return rc; 2351 2352 rss->hash_func = get_resp.u.flow_hash_func.selected_func; 2353 if (func) 2354 *func = rss->hash_func; 2355 2356 if (key) 2357 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); 2358 2359 return 0; 2360} 2361 2362int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2363 enum ena_admin_flow_hash_proto proto, 2364 u16 *fields) 2365{ 2366 struct ena_rss *rss = &ena_dev->rss; 2367 struct ena_admin_get_feat_resp get_resp; 2368 int rc; 2369 2370 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2371 ENA_ADMIN_RSS_HASH_INPUT, 2372 rss->hash_ctrl_dma_addr, 2373 sizeof(*rss->hash_ctrl)); 2374 if (unlikely(rc)) 2375 return rc; 2376 2377 if (fields) 2378 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2379 2380 return 0; 2381} 2382 2383int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2384{ 2385 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2386 struct ena_rss *rss = &ena_dev->rss; 2387 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2388 struct ena_admin_set_feat_cmd cmd; 2389 struct ena_admin_set_feat_resp resp; 2390 int ret; 2391 2392 if (!ena_com_check_supported_feature_id(ena_dev, 2393 ENA_ADMIN_RSS_HASH_INPUT)) { 2394 ena_trc_dbg("Feature %d isn't supported\n", 2395 ENA_ADMIN_RSS_HASH_INPUT); 2396 return ENA_COM_UNSUPPORTED; 2397 } 2398 2399 memset(&cmd, 0x0, sizeof(cmd)); 2400 2401 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2402 cmd.aq_common_descriptor.flags = 2403 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2404 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2405 cmd.u.flow_hash_input.enabled_input_sort = 2406 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2407 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2408 2409 ret = ena_com_mem_addr_set(ena_dev, 2410 &cmd.control_buffer.address, 2411 rss->hash_ctrl_dma_addr); 2412 if (unlikely(ret)) { 2413 ena_trc_err("memory address set failed\n"); 2414 return ret; 2415 } 2416 cmd.control_buffer.length = sizeof(*hash_ctrl); 2417 2418 ret = ena_com_execute_admin_command(admin_queue, 2419 (struct ena_admin_aq_entry *)&cmd, 2420 sizeof(cmd), 2421 (struct ena_admin_acq_entry *)&resp, 2422 sizeof(resp)); 2423 if (unlikely(ret)) 2424 ena_trc_err("Failed to set hash input. error: %d\n", ret); 2425 2426 return ret; 2427} 2428 2429int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2430{ 2431 struct ena_rss *rss = &ena_dev->rss; 2432 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2433 rss->hash_ctrl; 2434 u16 available_fields = 0; 2435 int rc, i; 2436 2437 /* Get the supported hash input */ 2438 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2439 if (unlikely(rc)) 2440 return rc; 2441 2442 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2443 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2444 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2445 2446 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2447 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2448 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2449 2450 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2451 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2452 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2453 2454 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2455 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2456 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2457 2458 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2459 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2460 2461 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2462 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2463 2464 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2465 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2466 2467 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2468 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2469 2470 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2471 available_fields = hash_ctrl->selected_fields[i].fields & 2472 hash_ctrl->supported_fields[i].fields; 2473 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2474 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2475 i, hash_ctrl->supported_fields[i].fields, 2476 hash_ctrl->selected_fields[i].fields); 2477 return ENA_COM_UNSUPPORTED; 2478 } 2479 } 2480 2481 rc = ena_com_set_hash_ctrl(ena_dev); 2482 2483 /* In case of failure, restore the old hash ctrl */ 2484 if (unlikely(rc)) 2485 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2486 2487 return rc; 2488} 2489 2490int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2491 enum ena_admin_flow_hash_proto proto, 2492 u16 hash_fields) 2493{ 2494 struct ena_rss *rss = &ena_dev->rss; 2495 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2496 u16 supported_fields; 2497 int rc; 2498 2499 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2500 ena_trc_err("Invalid proto num (%u)\n", proto); 2501 return ENA_COM_INVAL; 2502 } 2503 2504 /* Get the ctrl table */ 2505 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2506 if (unlikely(rc)) 2507 return rc; 2508 2509 /* Make sure all the fields are supported */ 2510 supported_fields = hash_ctrl->supported_fields[proto].fields; 2511 if ((hash_fields & supported_fields) != hash_fields) { 2512 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n", 2513 proto, hash_fields, supported_fields); 2514 } 2515 2516 hash_ctrl->selected_fields[proto].fields = hash_fields; 2517 2518 rc = ena_com_set_hash_ctrl(ena_dev); 2519 2520 /* In case of failure, restore the old hash ctrl */ 2521 if (unlikely(rc)) 2522 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2523 2524 return 0; 2525} 2526 2527int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2528 u16 entry_idx, u16 entry_value) 2529{ 2530 struct ena_rss *rss = &ena_dev->rss; 2531 2532 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2533 return ENA_COM_INVAL; 2534 2535 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2536 return ENA_COM_INVAL; 2537 2538 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2539 2540 return 0; 2541} 2542 2543int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2544{ 2545 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2546 struct ena_rss *rss = &ena_dev->rss; 2547 struct ena_admin_set_feat_cmd cmd; 2548 struct ena_admin_set_feat_resp resp; 2549 int ret; 2550 2551 if (!ena_com_check_supported_feature_id(ena_dev, 2552 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { 2553 ena_trc_dbg("Feature %d isn't supported\n", 2554 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 2555 return ENA_COM_UNSUPPORTED; 2556 } 2557 2558 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2559 if (ret) { 2560 ena_trc_err("Failed to convert host indirection table to device table\n"); 2561 return ret; 2562 } 2563 2564 memset(&cmd, 0x0, sizeof(cmd)); 2565 2566 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2567 cmd.aq_common_descriptor.flags = 2568 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2569 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; 2570 cmd.u.ind_table.size = rss->tbl_log_size; 2571 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2572 2573 ret = ena_com_mem_addr_set(ena_dev, 2574 &cmd.control_buffer.address, 2575 rss->rss_ind_tbl_dma_addr); 2576 if (unlikely(ret)) { 2577 ena_trc_err("memory address set failed\n"); 2578 return ret; 2579 } 2580 2581 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2582 sizeof(struct ena_admin_rss_ind_table_entry); 2583 2584 ret = ena_com_execute_admin_command(admin_queue, 2585 (struct ena_admin_aq_entry *)&cmd, 2586 sizeof(cmd), 2587 (struct ena_admin_acq_entry *)&resp, 2588 sizeof(resp)); 2589 2590 if (unlikely(ret)) 2591 ena_trc_err("Failed to set indirect table. error: %d\n", ret); 2592 2593 return ret; 2594} 2595 2596int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2597{ 2598 struct ena_rss *rss = &ena_dev->rss; 2599 struct ena_admin_get_feat_resp get_resp; 2600 u32 tbl_size; 2601 int i, rc; 2602 2603 tbl_size = (1ULL << rss->tbl_log_size) * 2604 sizeof(struct ena_admin_rss_ind_table_entry); 2605 2606 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2607 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 2608 rss->rss_ind_tbl_dma_addr, 2609 tbl_size); 2610 if (unlikely(rc)) 2611 return rc; 2612 2613 if (!ind_tbl) 2614 return 0; 2615 2616 rc = ena_com_ind_tbl_convert_from_device(ena_dev); 2617 if (unlikely(rc)) 2618 return rc; 2619 2620 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2621 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2622 2623 return 0; 2624} 2625 2626int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2627{ 2628 int rc; 2629 2630 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2631 2632 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2633 if (unlikely(rc)) 2634 goto err_indr_tbl; 2635 2636 rc = ena_com_hash_key_allocate(ena_dev); 2637 if (unlikely(rc)) 2638 goto err_hash_key; 2639 2640 rc = ena_com_hash_ctrl_init(ena_dev); 2641 if (unlikely(rc)) 2642 goto err_hash_ctrl; 2643 2644 return 0; 2645 2646err_hash_ctrl: 2647 ena_com_hash_key_destroy(ena_dev); 2648err_hash_key: 2649 ena_com_indirect_table_destroy(ena_dev); 2650err_indr_tbl: 2651 2652 return rc; 2653} 2654 2655void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2656{ 2657 ena_com_indirect_table_destroy(ena_dev); 2658 ena_com_hash_key_destroy(ena_dev); 2659 ena_com_hash_ctrl_destroy(ena_dev); 2660 2661 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2662} 2663 2664int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2665{ 2666 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2667 2668 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2669 SZ_4K, 2670 host_attr->host_info, 2671 host_attr->host_info_dma_addr, 2672 host_attr->host_info_dma_handle); 2673 if (unlikely(!host_attr->host_info)) 2674 return ENA_COM_NO_MEM; 2675 2676 return 0; 2677} 2678 2679int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2680 u32 debug_area_size) 2681{ 2682 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2683 2684 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2685 debug_area_size, 2686 host_attr->debug_area_virt_addr, 2687 host_attr->debug_area_dma_addr, 2688 host_attr->debug_area_dma_handle); 2689 if (unlikely(!host_attr->debug_area_virt_addr)) { 2690 host_attr->debug_area_size = 0; 2691 return ENA_COM_NO_MEM; 2692 } 2693 2694 host_attr->debug_area_size = debug_area_size; 2695 2696 return 0; 2697} 2698 2699void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2700{ 2701 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2702 2703 if (host_attr->host_info) { 2704 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2705 SZ_4K, 2706 host_attr->host_info, 2707 host_attr->host_info_dma_addr, 2708 host_attr->host_info_dma_handle); 2709 host_attr->host_info = NULL; 2710 } 2711} 2712 2713void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2714{ 2715 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2716 2717 if (host_attr->debug_area_virt_addr) { 2718 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2719 host_attr->debug_area_size, 2720 host_attr->debug_area_virt_addr, 2721 host_attr->debug_area_dma_addr, 2722 host_attr->debug_area_dma_handle); 2723 host_attr->debug_area_virt_addr = NULL; 2724 } 2725} 2726 2727int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2728{ 2729 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2730 struct ena_com_admin_queue *admin_queue; 2731 struct ena_admin_set_feat_cmd cmd; 2732 struct ena_admin_set_feat_resp resp; 2733 2734 int ret; 2735 2736 /* Host attribute config is called before ena_com_get_dev_attr_feat 2737 * so ena_com can't check if the feature is supported. 2738 */ 2739 2740 memset(&cmd, 0x0, sizeof(cmd)); 2741 admin_queue = &ena_dev->admin_queue; 2742 2743 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2744 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2745 2746 ret = ena_com_mem_addr_set(ena_dev, 2747 &cmd.u.host_attr.debug_ba, 2748 host_attr->debug_area_dma_addr); 2749 if (unlikely(ret)) { 2750 ena_trc_err("memory address set failed\n"); 2751 return ret; 2752 } 2753 2754 ret = ena_com_mem_addr_set(ena_dev, 2755 &cmd.u.host_attr.os_info_ba, 2756 host_attr->host_info_dma_addr); 2757 if (unlikely(ret)) { 2758 ena_trc_err("memory address set failed\n"); 2759 return ret; 2760 } 2761 2762 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2763 2764 ret = ena_com_execute_admin_command(admin_queue, 2765 (struct ena_admin_aq_entry *)&cmd, 2766 sizeof(cmd), 2767 (struct ena_admin_acq_entry *)&resp, 2768 sizeof(resp)); 2769 2770 if (unlikely(ret)) 2771 ena_trc_err("Failed to set host attributes: %d\n", ret); 2772 2773 return ret; 2774} 2775 2776/* Interrupt moderation */ 2777bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2778{ 2779 return ena_com_check_supported_feature_id(ena_dev, 2780 ENA_ADMIN_INTERRUPT_MODERATION); 2781} 2782 2783int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2784 u32 tx_coalesce_usecs) 2785{ 2786 if (!ena_dev->intr_delay_resolution) { 2787 ena_trc_err("Illegal interrupt delay granularity value\n"); 2788 return ENA_COM_FAULT; 2789 } 2790 2791 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / 2792 ena_dev->intr_delay_resolution; 2793 2794 return 0; 2795} 2796 2797int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2798 u32 rx_coalesce_usecs) 2799{ 2800 if (!ena_dev->intr_delay_resolution) { 2801 ena_trc_err("Illegal interrupt delay granularity value\n"); 2802 return ENA_COM_FAULT; 2803 } 2804 2805 /* We use LOWEST entry of moderation table for storing 2806 * nonadaptive interrupt coalescing values 2807 */ 2808 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2809 rx_coalesce_usecs / ena_dev->intr_delay_resolution; 2810 2811 return 0; 2812} 2813 2814void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) 2815{ 2816 size_t size; 2817 2818 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; 2819 if (ena_dev->intr_moder_tbl) 2820 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl, size); 2821 ena_dev->intr_moder_tbl = NULL; 2822} 2823 2824int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2825{ 2826 struct ena_admin_get_feat_resp get_resp; 2827 u16 delay_resolution; 2828 int rc; 2829 2830 rc = ena_com_get_feature(ena_dev, &get_resp, 2831 ENA_ADMIN_INTERRUPT_MODERATION); 2832 2833 if (rc) { 2834 if (rc == ENA_COM_UNSUPPORTED) { 2835 ena_trc_dbg("Feature %d isn't supported\n", 2836 ENA_ADMIN_INTERRUPT_MODERATION); 2837 rc = 0; 2838 } else { 2839 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 2840 rc); 2841 } 2842 2843 /* no moderation supported, disable adaptive support */ 2844 ena_com_disable_adaptive_moderation(ena_dev); 2845 return rc; 2846 } 2847 2848 rc = ena_com_init_interrupt_moderation_table(ena_dev); 2849 if (rc) 2850 goto err; 2851 2852 /* if moderation is supported by device we set adaptive moderation */ 2853 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 2854 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 2855 ena_com_enable_adaptive_moderation(ena_dev); 2856 2857 return 0; 2858err: 2859 ena_com_destroy_interrupt_moderation(ena_dev); 2860 return rc; 2861} 2862 2863void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) 2864{ 2865 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2866 2867 if (!intr_moder_tbl) 2868 return; 2869 2870 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2871 ENA_INTR_LOWEST_USECS; 2872 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = 2873 ENA_INTR_LOWEST_PKTS; 2874 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = 2875 ENA_INTR_LOWEST_BYTES; 2876 2877 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = 2878 ENA_INTR_LOW_USECS; 2879 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = 2880 ENA_INTR_LOW_PKTS; 2881 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = 2882 ENA_INTR_LOW_BYTES; 2883 2884 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = 2885 ENA_INTR_MID_USECS; 2886 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = 2887 ENA_INTR_MID_PKTS; 2888 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = 2889 ENA_INTR_MID_BYTES; 2890 2891 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = 2892 ENA_INTR_HIGH_USECS; 2893 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = 2894 ENA_INTR_HIGH_PKTS; 2895 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = 2896 ENA_INTR_HIGH_BYTES; 2897 2898 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = 2899 ENA_INTR_HIGHEST_USECS; 2900 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = 2901 ENA_INTR_HIGHEST_PKTS; 2902 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = 2903 ENA_INTR_HIGHEST_BYTES; 2904} 2905 2906unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 2907{ 2908 return ena_dev->intr_moder_tx_interval; 2909} 2910 2911unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 2912{ 2913 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2914 2915 if (intr_moder_tbl) 2916 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; 2917 2918 return 0; 2919} 2920 2921void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, 2922 enum ena_intr_moder_level level, 2923 struct ena_intr_moder_entry *entry) 2924{ 2925 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2926 2927 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2928 return; 2929 2930 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; 2931 if (ena_dev->intr_delay_resolution) 2932 intr_moder_tbl[level].intr_moder_interval /= 2933 ena_dev->intr_delay_resolution; 2934 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; 2935 2936 /* use hardcoded value until ethtool supports bytecount parameter */ 2937 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) 2938 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; 2939} 2940 2941void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, 2942 enum ena_intr_moder_level level, 2943 struct ena_intr_moder_entry *entry) 2944{ 2945 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2946 2947 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2948 return; 2949 2950 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; 2951 if (ena_dev->intr_delay_resolution) 2952 entry->intr_moder_interval *= ena_dev->intr_delay_resolution; 2953 entry->pkts_per_interval = 2954 intr_moder_tbl[level].pkts_per_interval; 2955 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; 2956} 2957 2958int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 2959 struct ena_admin_feature_llq_desc *llq) 2960{ 2961 int rc; 2962 int size; 2963 2964 if (llq->max_llq_num == 0) { 2965 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2966 return 0; 2967 } 2968 2969 rc = ena_com_config_llq_info(ena_dev, llq); 2970 if (rc) 2971 return rc; 2972 2973 /* Validate the descriptor is not too big */ 2974 size = ena_dev->tx_max_header_size; 2975 size += ena_dev->llq_info.descs_num_before_header * 2976 sizeof(struct ena_eth_io_tx_desc); 2977 2978 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { 2979 ena_trc_err("the size of the LLQ entry is smaller than needed\n"); 2980 return ENA_COM_INVAL; 2981 } 2982 2983 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 2984 2985 return 0; 2986} 2987