1320731Szbb/*- 2368013Smw * SPDX-License-Identifier: BSD-3-Clause 3320731Szbb * 4361534Smw * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 5320731Szbb * All rights reserved. 6320731Szbb * 7320731Szbb * Redistribution and use in source and binary forms, with or without 8320731Szbb * modification, are permitted provided that the following conditions 9320731Szbb * are met: 10320731Szbb * 11320731Szbb * * Redistributions of source code must retain the above copyright 12320731Szbb * notice, this list of conditions and the following disclaimer. 13320731Szbb * * Redistributions in binary form must reproduce the above copyright 14320731Szbb * notice, this list of conditions and the following disclaimer in 15320731Szbb * the documentation and/or other materials provided with the 16320731Szbb * distribution. 17320731Szbb * * Neither the name of copyright holder nor the names of its 18320731Szbb * contributors may be used to endorse or promote products derived 19320731Szbb * from this software without specific prior written permission. 20320731Szbb * 21320731Szbb * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22320731Szbb * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23320731Szbb * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24320731Szbb * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25320731Szbb * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26320731Szbb * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27320731Szbb * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28320731Szbb * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29320731Szbb * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30320731Szbb * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31320731Szbb * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32320731Szbb */ 33320731Szbb 34320731Szbb#include "ena_eth_com.h" 35320731Szbb 36361534Smwstatic struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( 37320731Szbb struct ena_com_io_cq *io_cq) 38320731Szbb{ 39320731Szbb struct ena_eth_io_rx_cdesc_base *cdesc; 40320731Szbb u16 expected_phase, head_masked; 41320731Szbb u16 desc_phase; 42320731Szbb 43320731Szbb head_masked = io_cq->head & (io_cq->q_depth - 1); 44320731Szbb expected_phase = io_cq->phase; 45320731Szbb 46320731Szbb cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr 47320731Szbb + (head_masked * io_cq->cdesc_entry_size_in_bytes)); 48320731Szbb 49361467Smw desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> 50320731Szbb ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; 51320731Szbb 52320731Szbb if (desc_phase != expected_phase) 53320731Szbb return NULL; 54320731Szbb 55361467Smw /* Make sure we read the rest of the descriptor after the phase bit 56361467Smw * has been read 57361467Smw */ 58361467Smw dma_rmb(); 59361467Smw 60320731Szbb return cdesc; 61320731Szbb} 62320731Szbb 63361534Smwstatic void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) 64320731Szbb{ 65320731Szbb u16 tail_masked; 66320731Szbb u32 offset; 67320731Szbb 68320731Szbb tail_masked = io_sq->tail & (io_sq->q_depth - 1); 69320731Szbb 70320731Szbb offset = tail_masked * io_sq->desc_entry_size; 71320731Szbb 72320731Szbb return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); 73320731Szbb} 74320731Szbb 75361534Smwstatic int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, 76361467Smw u8 *bounce_buffer) 77320731Szbb{ 78343397Smw struct ena_com_llq_info *llq_info = &io_sq->llq_info; 79320731Szbb 80343397Smw u16 dst_tail_mask; 81343397Smw u32 dst_offset; 82320731Szbb 83343397Smw dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); 84343397Smw dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; 85320731Szbb 86361467Smw if (is_llq_max_tx_burst_exists(io_sq)) { 87361467Smw if (unlikely(!io_sq->entries_in_tx_burst_left)) { 88368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 89368013Smw "Error: trying to send more packets than tx burst allows\n"); 90361467Smw return ENA_COM_NO_SPACE; 91361467Smw } 92361467Smw 93361467Smw io_sq->entries_in_tx_burst_left--; 94368013Smw ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), 95368013Smw "Decreasing entries_in_tx_burst_left of queue %d to %d\n", 96361467Smw io_sq->qid, io_sq->entries_in_tx_burst_left); 97361467Smw } 98361467Smw 99343397Smw /* Make sure everything was written into the bounce buffer before 100343397Smw * writing the bounce buffer to the device 101343397Smw */ 102343397Smw wmb(); 103343397Smw 104343397Smw /* The line is completed. Copy it to dev */ 105343397Smw ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset, 106343397Smw bounce_buffer, 107343397Smw llq_info->desc_list_entry_size); 108343397Smw 109320731Szbb io_sq->tail++; 110320731Szbb 111320731Szbb /* Switch phase bit in case of wrap around */ 112320731Szbb if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) 113320731Szbb io_sq->phase ^= 1; 114361467Smw 115361467Smw return ENA_COM_OK; 116320731Szbb} 117320731Szbb 118361534Smwstatic int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, 119343397Smw u8 *header_src, 120343397Smw u16 header_len) 121320731Szbb{ 122343397Smw struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; 123343397Smw struct ena_com_llq_info *llq_info = &io_sq->llq_info; 124343397Smw u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; 125343397Smw u16 header_offset; 126320731Szbb 127361467Smw if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) 128320731Szbb return 0; 129320731Szbb 130343397Smw header_offset = 131343397Smw llq_info->descs_num_before_header * io_sq->desc_entry_size; 132343397Smw 133343397Smw if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { 134368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 135368013Smw "Trying to write header larger than llq entry can accommodate\n"); 136343397Smw return ENA_COM_FAULT; 137320731Szbb } 138320731Szbb 139343397Smw if (unlikely(!bounce_buffer)) { 140368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 141368013Smw "Bounce buffer is NULL\n"); 142343397Smw return ENA_COM_FAULT; 143343397Smw } 144320731Szbb 145343397Smw memcpy(bounce_buffer + header_offset, header_src, header_len); 146343397Smw 147320731Szbb return 0; 148320731Szbb} 149320731Szbb 150361534Smwstatic void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) 151343397Smw{ 152343397Smw struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; 153343397Smw u8 *bounce_buffer; 154343397Smw void *sq_desc; 155343397Smw 156343397Smw bounce_buffer = pkt_ctrl->curr_bounce_buf; 157343397Smw 158343397Smw if (unlikely(!bounce_buffer)) { 159368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 160368013Smw "Bounce buffer is NULL\n"); 161343397Smw return NULL; 162343397Smw } 163343397Smw 164343397Smw sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; 165343397Smw pkt_ctrl->idx++; 166343397Smw pkt_ctrl->descs_left_in_line--; 167343397Smw 168343397Smw return sq_desc; 169343397Smw} 170343397Smw 171361534Smwstatic int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) 172343397Smw{ 173343397Smw struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; 174343397Smw struct ena_com_llq_info *llq_info = &io_sq->llq_info; 175361467Smw int rc; 176343397Smw 177361467Smw if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) 178361467Smw return ENA_COM_OK; 179343397Smw 180343397Smw /* bounce buffer was used, so write it and get a new one */ 181343397Smw if (pkt_ctrl->idx) { 182361467Smw rc = ena_com_write_bounce_buffer_to_dev(io_sq, 183361467Smw pkt_ctrl->curr_bounce_buf); 184361534Smw if (unlikely(rc)) { 185368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 186368013Smw "Failed to write bounce buffer to device\n"); 187361467Smw return rc; 188361534Smw } 189361467Smw 190343397Smw pkt_ctrl->curr_bounce_buf = 191343397Smw ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 192361467Smw memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 193361467Smw 0x0, llq_info->desc_list_entry_size); 194343397Smw } 195343397Smw 196343397Smw pkt_ctrl->idx = 0; 197343397Smw pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; 198361467Smw return ENA_COM_OK; 199343397Smw} 200343397Smw 201361534Smwstatic void *get_sq_desc(struct ena_com_io_sq *io_sq) 202343397Smw{ 203343397Smw if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 204343397Smw return get_sq_desc_llq(io_sq); 205343397Smw 206343397Smw return get_sq_desc_regular_queue(io_sq); 207343397Smw} 208343397Smw 209361534Smwstatic int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) 210343397Smw{ 211343397Smw struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; 212343397Smw struct ena_com_llq_info *llq_info = &io_sq->llq_info; 213361467Smw int rc; 214343397Smw 215343397Smw if (!pkt_ctrl->descs_left_in_line) { 216361467Smw rc = ena_com_write_bounce_buffer_to_dev(io_sq, 217361467Smw pkt_ctrl->curr_bounce_buf); 218361534Smw if (unlikely(rc)) { 219368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 220368013Smw "Failed to write bounce buffer to device\n"); 221361467Smw return rc; 222361534Smw } 223343397Smw 224343397Smw pkt_ctrl->curr_bounce_buf = 225343397Smw ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 226361534Smw memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 227361534Smw 0x0, llq_info->desc_list_entry_size); 228343397Smw 229343397Smw pkt_ctrl->idx = 0; 230361467Smw if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) 231343397Smw pkt_ctrl->descs_left_in_line = 1; 232343397Smw else 233343397Smw pkt_ctrl->descs_left_in_line = 234343397Smw llq_info->desc_list_entry_size / io_sq->desc_entry_size; 235343397Smw } 236361467Smw 237361467Smw return ENA_COM_OK; 238343397Smw} 239343397Smw 240361534Smwstatic int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) 241343397Smw{ 242361467Smw if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 243361467Smw return ena_com_sq_update_llq_tail(io_sq); 244343397Smw 245343397Smw io_sq->tail++; 246343397Smw 247343397Smw /* Switch phase bit in case of wrap around */ 248343397Smw if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) 249343397Smw io_sq->phase ^= 1; 250361467Smw 251361467Smw return ENA_COM_OK; 252343397Smw} 253343397Smw 254361534Smwstatic struct ena_eth_io_rx_cdesc_base * 255320731Szbb ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) 256320731Szbb{ 257320731Szbb idx &= (io_cq->q_depth - 1); 258320731Szbb return (struct ena_eth_io_rx_cdesc_base *) 259320731Szbb ((uintptr_t)io_cq->cdesc_addr.virt_addr + 260320731Szbb idx * io_cq->cdesc_entry_size_in_bytes); 261320731Szbb} 262320731Szbb 263361534Smwstatic u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, 264320731Szbb u16 *first_cdesc_idx) 265320731Szbb{ 266320731Szbb struct ena_eth_io_rx_cdesc_base *cdesc; 267320731Szbb u16 count = 0, head_masked; 268320731Szbb u32 last = 0; 269320731Szbb 270320731Szbb do { 271320731Szbb cdesc = ena_com_get_next_rx_cdesc(io_cq); 272320731Szbb if (!cdesc) 273320731Szbb break; 274320731Szbb 275320731Szbb ena_com_cq_inc_head(io_cq); 276320731Szbb count++; 277361467Smw last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> 278320731Szbb ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; 279320731Szbb } while (!last); 280320731Szbb 281320731Szbb if (last) { 282320731Szbb *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; 283320731Szbb count += io_cq->cur_rx_pkt_cdesc_count; 284320731Szbb 285320731Szbb head_masked = io_cq->head & (io_cq->q_depth - 1); 286320731Szbb 287320731Szbb io_cq->cur_rx_pkt_cdesc_count = 0; 288320731Szbb io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; 289320731Szbb 290368013Smw ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), 291368013Smw "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", 292320731Szbb io_cq->qid, *first_cdesc_idx, count); 293320731Szbb } else { 294320731Szbb io_cq->cur_rx_pkt_cdesc_count += count; 295320731Szbb count = 0; 296320731Szbb } 297320731Szbb 298320731Szbb return count; 299320731Szbb} 300320731Szbb 301361534Smwstatic int ena_com_create_meta(struct ena_com_io_sq *io_sq, 302361534Smw struct ena_com_tx_meta *ena_meta) 303320731Szbb{ 304320731Szbb struct ena_eth_io_tx_meta_desc *meta_desc = NULL; 305320731Szbb 306320731Szbb meta_desc = get_sq_desc(io_sq); 307368013Smw if (unlikely(!meta_desc)) 308368013Smw return ENA_COM_FAULT; 309368013Smw 310320731Szbb memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); 311320731Szbb 312320731Szbb meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; 313320731Szbb 314320731Szbb meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; 315320731Szbb 316320731Szbb /* bits 0-9 of the mss */ 317368013Smw meta_desc->word2 |= ((u32)ena_meta->mss << 318320731Szbb ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & 319320731Szbb ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; 320320731Szbb /* bits 10-13 of the mss */ 321320731Szbb meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << 322320731Szbb ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & 323320731Szbb ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; 324320731Szbb 325320731Szbb /* Extended meta desc */ 326320731Szbb meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; 327368013Smw meta_desc->len_ctrl |= ((u32)io_sq->phase << 328320731Szbb ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & 329320731Szbb ENA_ETH_IO_TX_META_DESC_PHASE_MASK; 330320731Szbb 331320731Szbb meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; 332361534Smw meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; 333361534Smw 334320731Szbb meta_desc->word2 |= ena_meta->l3_hdr_len & 335320731Szbb ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; 336320731Szbb meta_desc->word2 |= (ena_meta->l3_hdr_offset << 337320731Szbb ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & 338320731Szbb ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; 339320731Szbb 340368013Smw meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len << 341320731Szbb ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & 342320731Szbb ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; 343320731Szbb 344361534Smw return ena_com_sq_update_tail(io_sq); 345361534Smw} 346320731Szbb 347361534Smwstatic int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, 348361534Smw struct ena_com_tx_ctx *ena_tx_ctx, 349361534Smw bool *have_meta) 350361534Smw{ 351361534Smw struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 352320731Szbb 353361534Smw /* When disable meta caching is set, don't bother to save the meta and 354361534Smw * compare it to the stored version, just create the meta 355361534Smw */ 356361534Smw if (io_sq->disable_meta_caching) { 357361534Smw if (unlikely(!ena_tx_ctx->meta_valid)) 358361534Smw return ENA_COM_INVAL; 359361534Smw 360361534Smw *have_meta = true; 361361534Smw return ena_com_create_meta(io_sq, ena_meta); 362368013Smw } 363368013Smw 364368013Smw if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { 365361534Smw *have_meta = true; 366361534Smw /* Cache the meta desc */ 367361534Smw memcpy(&io_sq->cached_tx_meta, ena_meta, 368361534Smw sizeof(struct ena_com_tx_meta)); 369361534Smw return ena_com_create_meta(io_sq, ena_meta); 370361534Smw } 371368013Smw 372368013Smw *have_meta = false; 373368013Smw return ENA_COM_OK; 374320731Szbb} 375320731Szbb 376368013Smwstatic void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, 377368013Smw struct ena_com_rx_ctx *ena_rx_ctx, 378368013Smw struct ena_eth_io_rx_cdesc_base *cdesc) 379320731Szbb{ 380320731Szbb ena_rx_ctx->l3_proto = cdesc->status & 381320731Szbb ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; 382320731Szbb ena_rx_ctx->l4_proto = 383320731Szbb (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> 384320731Szbb ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; 385320731Szbb ena_rx_ctx->l3_csum_err = 386361467Smw !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> 387361467Smw ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); 388320731Szbb ena_rx_ctx->l4_csum_err = 389361467Smw !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> 390361467Smw ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); 391361467Smw ena_rx_ctx->l4_csum_checked = 392361467Smw !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> 393361467Smw ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); 394320731Szbb ena_rx_ctx->hash = cdesc->hash; 395320731Szbb ena_rx_ctx->frag = 396320731Szbb (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> 397320731Szbb ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; 398320731Szbb 399368013Smw ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), 400368013Smw "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", 401320731Szbb ena_rx_ctx->l3_proto, 402320731Szbb ena_rx_ctx->l4_proto, 403320731Szbb ena_rx_ctx->l3_csum_err, 404320731Szbb ena_rx_ctx->l4_csum_err, 405320731Szbb ena_rx_ctx->hash, 406320731Szbb ena_rx_ctx->frag, 407320731Szbb cdesc->status); 408320731Szbb} 409320731Szbb 410320731Szbb/*****************************************************************************/ 411320731Szbb/***************************** API **********************************/ 412320731Szbb/*****************************************************************************/ 413320731Szbb 414320731Szbbint ena_com_prepare_tx(struct ena_com_io_sq *io_sq, 415320731Szbb struct ena_com_tx_ctx *ena_tx_ctx, 416320731Szbb int *nb_hw_desc) 417320731Szbb{ 418320731Szbb struct ena_eth_io_tx_desc *desc = NULL; 419320731Szbb struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; 420343397Smw void *buffer_to_push = ena_tx_ctx->push_header; 421320731Szbb u16 header_len = ena_tx_ctx->header_len; 422320731Szbb u16 num_bufs = ena_tx_ctx->num_bufs; 423343397Smw u16 start_tail = io_sq->tail; 424343397Smw int i, rc; 425320731Szbb bool have_meta; 426320731Szbb u64 addr_hi; 427320731Szbb 428320731Szbb ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, 429368013Smw ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type"); 430320731Szbb 431320731Szbb /* num_bufs +1 for potential meta desc */ 432361467Smw if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { 433368013Smw ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), 434368013Smw "Not enough space in the tx queue\n"); 435320731Szbb return ENA_COM_NO_MEM; 436320731Szbb } 437320731Szbb 438320731Szbb if (unlikely(header_len > io_sq->tx_max_header_size)) { 439368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 440368013Smw "Header size is too large %d max header: %d\n", 441320731Szbb header_len, io_sq->tx_max_header_size); 442320731Szbb return ENA_COM_INVAL; 443320731Szbb } 444320731Szbb 445361467Smw if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV 446361534Smw && !buffer_to_push)) { 447368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 448368013Smw "Push header wasn't provided on LLQ mode\n"); 449343397Smw return ENA_COM_INVAL; 450361534Smw } 451343397Smw 452343397Smw rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); 453320731Szbb if (unlikely(rc)) 454320731Szbb return rc; 455320731Szbb 456361534Smw rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); 457361534Smw if (unlikely(rc)) { 458368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 459368013Smw "Failed to create and store tx meta desc\n"); 460361534Smw return rc; 461361467Smw } 462320731Szbb 463361467Smw /* If the caller doesn't want to send packets */ 464320731Szbb if (unlikely(!num_bufs && !header_len)) { 465361467Smw rc = ena_com_close_bounce_buffer(io_sq); 466361534Smw if (rc) 467368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 468368013Smw "Failed to write buffers to LLQ\n"); 469343397Smw *nb_hw_desc = io_sq->tail - start_tail; 470361467Smw return rc; 471320731Szbb } 472320731Szbb 473320731Szbb desc = get_sq_desc(io_sq); 474343397Smw if (unlikely(!desc)) 475343397Smw return ENA_COM_FAULT; 476320731Szbb memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); 477320731Szbb 478320731Szbb /* Set first desc when we don't have meta descriptor */ 479320731Szbb if (!have_meta) 480320731Szbb desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; 481320731Szbb 482368013Smw desc->buff_addr_hi_hdr_sz |= ((u32)header_len << 483320731Szbb ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & 484320731Szbb ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; 485368013Smw desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & 486320731Szbb ENA_ETH_IO_TX_DESC_PHASE_MASK; 487320731Szbb 488320731Szbb desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; 489320731Szbb 490320731Szbb /* Bits 0-9 */ 491368013Smw desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id << 492320731Szbb ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & 493320731Szbb ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; 494320731Szbb 495320731Szbb desc->meta_ctrl |= (ena_tx_ctx->df << 496320731Szbb ENA_ETH_IO_TX_DESC_DF_SHIFT) & 497320731Szbb ENA_ETH_IO_TX_DESC_DF_MASK; 498320731Szbb 499320731Szbb /* Bits 10-15 */ 500320731Szbb desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << 501320731Szbb ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & 502320731Szbb ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; 503320731Szbb 504320731Szbb if (ena_tx_ctx->meta_valid) { 505320731Szbb desc->meta_ctrl |= (ena_tx_ctx->tso_enable << 506320731Szbb ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & 507320731Szbb ENA_ETH_IO_TX_DESC_TSO_EN_MASK; 508320731Szbb desc->meta_ctrl |= ena_tx_ctx->l3_proto & 509320731Szbb ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; 510320731Szbb desc->meta_ctrl |= (ena_tx_ctx->l4_proto << 511320731Szbb ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & 512320731Szbb ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; 513320731Szbb desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << 514320731Szbb ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & 515320731Szbb ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; 516320731Szbb desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << 517320731Szbb ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & 518320731Szbb ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; 519320731Szbb desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << 520320731Szbb ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & 521320731Szbb ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; 522320731Szbb } 523320731Szbb 524320731Szbb for (i = 0; i < num_bufs; i++) { 525320731Szbb /* The first desc share the same desc as the header */ 526320731Szbb if (likely(i != 0)) { 527361467Smw rc = ena_com_sq_update_tail(io_sq); 528361534Smw if (unlikely(rc)) { 529368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 530368013Smw "Failed to update sq tail\n"); 531361467Smw return rc; 532361534Smw } 533320731Szbb 534320731Szbb desc = get_sq_desc(io_sq); 535343397Smw if (unlikely(!desc)) 536343397Smw return ENA_COM_FAULT; 537343397Smw 538320731Szbb memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); 539320731Szbb 540368013Smw desc->len_ctrl |= ((u32)io_sq->phase << 541320731Szbb ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & 542320731Szbb ENA_ETH_IO_TX_DESC_PHASE_MASK; 543320731Szbb } 544320731Szbb 545320731Szbb desc->len_ctrl |= ena_bufs->len & 546320731Szbb ENA_ETH_IO_TX_DESC_LENGTH_MASK; 547320731Szbb 548320731Szbb addr_hi = ((ena_bufs->paddr & 549320731Szbb GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); 550320731Szbb 551320731Szbb desc->buff_addr_lo = (u32)ena_bufs->paddr; 552320731Szbb desc->buff_addr_hi_hdr_sz |= addr_hi & 553320731Szbb ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; 554320731Szbb ena_bufs++; 555320731Szbb } 556320731Szbb 557320731Szbb /* set the last desc indicator */ 558320731Szbb desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; 559320731Szbb 560361467Smw rc = ena_com_sq_update_tail(io_sq); 561361534Smw if (unlikely(rc)) { 562368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 563368013Smw "Failed to update sq tail of the last descriptor\n"); 564361467Smw return rc; 565361534Smw } 566320731Szbb 567361467Smw rc = ena_com_close_bounce_buffer(io_sq); 568361534Smw if (rc) 569368013Smw ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), 570368013Smw "Failed when closing bounce buffer\n"); 571320731Szbb 572343397Smw *nb_hw_desc = io_sq->tail - start_tail; 573361467Smw return rc; 574320731Szbb} 575320731Szbb 576320731Szbbint ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 577320731Szbb struct ena_com_io_sq *io_sq, 578320731Szbb struct ena_com_rx_ctx *ena_rx_ctx) 579320731Szbb{ 580320731Szbb struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; 581320731Szbb struct ena_eth_io_rx_cdesc_base *cdesc = NULL; 582368013Smw u16 q_depth = io_cq->q_depth; 583320731Szbb u16 cdesc_idx = 0; 584320731Szbb u16 nb_hw_desc; 585361534Smw u16 i = 0; 586320731Szbb 587320731Szbb ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, 588368013Smw ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type"); 589320731Szbb 590320731Szbb nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); 591320731Szbb if (nb_hw_desc == 0) { 592320731Szbb ena_rx_ctx->descs = nb_hw_desc; 593320731Szbb return 0; 594320731Szbb } 595320731Szbb 596368013Smw ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), 597368013Smw "Fetch rx packet: queue %d completed desc: %d\n", 598320731Szbb io_cq->qid, nb_hw_desc); 599320731Szbb 600320731Szbb if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { 601368013Smw ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq), 602368013Smw "Too many RX cdescs (%d) > MAX(%d)\n", 603320731Szbb nb_hw_desc, ena_rx_ctx->max_bufs); 604320731Szbb return ENA_COM_NO_SPACE; 605320731Szbb } 606320731Szbb 607361534Smw cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx); 608361534Smw ena_rx_ctx->pkt_offset = cdesc->offset; 609320731Szbb 610361534Smw do { 611368013Smw ena_buf[i].len = cdesc->length; 612368013Smw ena_buf[i].req_id = cdesc->req_id; 613368013Smw if (unlikely(ena_buf[i].req_id >= q_depth)) 614368013Smw return ENA_COM_EIO; 615320731Szbb 616368013Smw if (++i >= nb_hw_desc) 617368013Smw break; 618368013Smw 619368013Smw cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); 620368013Smw 621368013Smw } while (1); 622368013Smw 623320731Szbb /* Update SQ head ptr */ 624320731Szbb io_sq->next_to_comp += nb_hw_desc; 625320731Szbb 626368013Smw ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), 627368013Smw "[%s][QID#%d] Updating SQ head to: %d\n", __func__, 628320731Szbb io_sq->qid, io_sq->next_to_comp); 629320731Szbb 630320731Szbb /* Get rx flags from the last pkt */ 631368013Smw ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); 632320731Szbb 633320731Szbb ena_rx_ctx->descs = nb_hw_desc; 634368013Smw 635320731Szbb return 0; 636320731Szbb} 637320731Szbb 638320731Szbbint ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, 639320731Szbb struct ena_com_buf *ena_buf, 640320731Szbb u16 req_id) 641320731Szbb{ 642320731Szbb struct ena_eth_io_rx_desc *desc; 643320731Szbb 644320731Szbb ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, 645368013Smw ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type"); 646320731Szbb 647343397Smw if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) 648320731Szbb return ENA_COM_NO_SPACE; 649320731Szbb 650320731Szbb desc = get_sq_desc(io_sq); 651343397Smw if (unlikely(!desc)) 652343397Smw return ENA_COM_FAULT; 653343397Smw 654320731Szbb memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); 655320731Szbb 656320731Szbb desc->length = ena_buf->len; 657320731Szbb 658361467Smw desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | 659368013Smw ENA_ETH_IO_RX_DESC_LAST_MASK | 660368013Smw ENA_ETH_IO_RX_DESC_COMP_REQ_MASK | 661368013Smw (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); 662320731Szbb 663320731Szbb desc->req_id = req_id; 664320731Szbb 665368013Smw ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), 666368013Smw "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", 667368013Smw __func__, io_sq->qid, req_id); 668368013Smw 669320731Szbb desc->buff_addr_lo = (u32)ena_buf->paddr; 670320731Szbb desc->buff_addr_hi = 671320731Szbb ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); 672320731Szbb 673361467Smw return ena_com_sq_update_tail(io_sq); 674320731Szbb} 675320731Szbb 676361467Smwbool ena_com_cq_empty(struct ena_com_io_cq *io_cq) 677320731Szbb{ 678361467Smw struct ena_eth_io_rx_cdesc_base *cdesc; 679320731Szbb 680361467Smw cdesc = ena_com_get_next_rx_cdesc(io_cq); 681361467Smw if (cdesc) 682361467Smw return false; 683361467Smw else 684361467Smw return true; 685320731Szbb} 686