1320731Szbb/*- 2368013Smw * SPDX-License-Identifier: BSD-3-Clause 3320731Szbb * 4361534Smw * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 5320731Szbb * All rights reserved. 6320731Szbb * 7320731Szbb * Redistribution and use in source and binary forms, with or without 8320731Szbb * modification, are permitted provided that the following conditions 9320731Szbb * are met: 10320731Szbb * 11320731Szbb * * Redistributions of source code must retain the above copyright 12320731Szbb * notice, this list of conditions and the following disclaimer. 13320731Szbb * * Redistributions in binary form must reproduce the above copyright 14320731Szbb * notice, this list of conditions and the following disclaimer in 15320731Szbb * the documentation and/or other materials provided with the 16320731Szbb * distribution. 17320731Szbb * * Neither the name of copyright holder nor the names of its 18320731Szbb * contributors may be used to endorse or promote products derived 19320731Szbb * from this software without specific prior written permission. 20320731Szbb * 21320731Szbb * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22320731Szbb * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23320731Szbb * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24320731Szbb * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25320731Szbb * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26320731Szbb * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27320731Szbb * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28320731Szbb * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29320731Szbb * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30320731Szbb * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31320731Szbb * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32320731Szbb */ 33320731Szbb 34320731Szbb#ifndef ENA_ETH_COM_H_ 35320731Szbb#define ENA_ETH_COM_H_ 36320731Szbb 37320731Szbb#if defined(__cplusplus) 38320731Szbbextern "C" { 39320731Szbb#endif 40320731Szbb#include "ena_com.h" 41320731Szbb 42320731Szbb/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ 43320731Szbb#define ENA_COMP_HEAD_THRESH 4 44320731Szbb 45320731Szbbstruct ena_com_tx_ctx { 46320731Szbb struct ena_com_tx_meta ena_meta; 47320731Szbb struct ena_com_buf *ena_bufs; 48320731Szbb /* For LLQ, header buffer - pushed to the device mem space */ 49320731Szbb void *push_header; 50320731Szbb 51320731Szbb enum ena_eth_io_l3_proto_index l3_proto; 52320731Szbb enum ena_eth_io_l4_proto_index l4_proto; 53320731Szbb u16 num_bufs; 54320731Szbb u16 req_id; 55320731Szbb /* For regular queue, indicate the size of the header 56320731Szbb * For LLQ, indicate the size of the pushed buffer 57320731Szbb */ 58320731Szbb u16 header_len; 59320731Szbb 60320731Szbb u8 meta_valid; 61320731Szbb u8 tso_enable; 62320731Szbb u8 l3_csum_enable; 63320731Szbb u8 l4_csum_enable; 64320731Szbb u8 l4_csum_partial; 65320731Szbb u8 df; /* Don't fragment */ 66320731Szbb}; 67320731Szbb 68320731Szbbstruct ena_com_rx_ctx { 69320731Szbb struct ena_com_rx_buf_info *ena_bufs; 70320731Szbb enum ena_eth_io_l3_proto_index l3_proto; 71320731Szbb enum ena_eth_io_l4_proto_index l4_proto; 72320731Szbb bool l3_csum_err; 73320731Szbb bool l4_csum_err; 74361467Smw u8 l4_csum_checked; 75320731Szbb /* fragmented packet */ 76320731Szbb bool frag; 77320731Szbb u32 hash; 78320731Szbb u16 descs; 79320731Szbb int max_bufs; 80361534Smw u8 pkt_offset; 81320731Szbb}; 82320731Szbb 83320731Szbbint ena_com_prepare_tx(struct ena_com_io_sq *io_sq, 84320731Szbb struct ena_com_tx_ctx *ena_tx_ctx, 85320731Szbb int *nb_hw_desc); 86320731Szbb 87320731Szbbint ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 88320731Szbb struct ena_com_io_sq *io_sq, 89320731Szbb struct ena_com_rx_ctx *ena_rx_ctx); 90320731Szbb 91320731Szbbint ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, 92320731Szbb struct ena_com_buf *ena_buf, 93320731Szbb u16 req_id); 94320731Szbb 95361467Smwbool ena_com_cq_empty(struct ena_com_io_cq *io_cq); 96320731Szbb 97320731Szbbstatic inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, 98320731Szbb struct ena_eth_io_intr_reg *intr_reg) 99320731Szbb{ 100320731Szbb ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg); 101320731Szbb} 102320731Szbb 103361534Smwstatic inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) 104320731Szbb{ 105320731Szbb u16 tail, next_to_comp, cnt; 106320731Szbb 107320731Szbb next_to_comp = io_sq->next_to_comp; 108320731Szbb tail = io_sq->tail; 109320731Szbb cnt = tail - next_to_comp; 110320731Szbb 111320731Szbb return io_sq->q_depth - 1 - cnt; 112320731Szbb} 113320731Szbb 114343397Smw/* Check if the submission queue has enough space to hold required_buffers */ 115343397Smwstatic inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, 116343397Smw u16 required_buffers) 117343397Smw{ 118343397Smw int temp; 119343397Smw 120343397Smw if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 121361534Smw return ena_com_free_q_entries(io_sq) >= required_buffers; 122343397Smw 123343397Smw /* This calculation doesn't need to be 100% accurate. So to reduce 124343397Smw * the calculation overhead just Subtract 2 lines from the free descs 125343397Smw * (one for the header line and one to compensate the devision 126343397Smw * down calculation. 127343397Smw */ 128343397Smw temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; 129343397Smw 130361534Smw return ena_com_free_q_entries(io_sq) > temp; 131343397Smw} 132343397Smw 133361467Smwstatic inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, 134361467Smw struct ena_com_tx_ctx *ena_tx_ctx) 135361467Smw{ 136361467Smw if (!ena_tx_ctx->meta_valid) 137361467Smw return false; 138361467Smw 139361467Smw return !!memcmp(&io_sq->cached_tx_meta, 140361467Smw &ena_tx_ctx->ena_meta, 141361467Smw sizeof(struct ena_com_tx_meta)); 142361467Smw} 143361467Smw 144361467Smwstatic inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) 145361467Smw{ 146361467Smw return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && 147361467Smw io_sq->llq_info.max_entries_in_tx_burst > 0; 148361467Smw} 149361467Smw 150361467Smwstatic inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, 151361467Smw struct ena_com_tx_ctx *ena_tx_ctx) 152361467Smw{ 153361467Smw struct ena_com_llq_info *llq_info; 154361467Smw int descs_after_first_entry; 155361467Smw int num_entries_needed = 1; 156361467Smw u16 num_descs; 157361467Smw 158361467Smw if (!is_llq_max_tx_burst_exists(io_sq)) 159361467Smw return false; 160361467Smw 161361467Smw llq_info = &io_sq->llq_info; 162361467Smw num_descs = ena_tx_ctx->num_bufs; 163361467Smw 164361534Smw if (llq_info->disable_meta_caching || 165361534Smw unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) 166361467Smw ++num_descs; 167361467Smw 168361467Smw if (num_descs > llq_info->descs_num_before_header) { 169361467Smw descs_after_first_entry = num_descs - llq_info->descs_num_before_header; 170361467Smw num_entries_needed += DIV_ROUND_UP(descs_after_first_entry, 171361467Smw llq_info->descs_per_entry); 172361467Smw } 173361467Smw 174368013Smw ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), 175368013Smw "Queue: %d num_descs: %d num_entries_needed: %d\n", 176361467Smw io_sq->qid, num_descs, num_entries_needed); 177361467Smw 178361467Smw return num_entries_needed > io_sq->entries_in_tx_burst_left; 179361467Smw} 180361467Smw 181320731Szbbstatic inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) 182320731Szbb{ 183361534Smw u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; 184361467Smw u16 tail = io_sq->tail; 185320731Szbb 186368013Smw ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), 187368013Smw "Write submission queue doorbell for queue: %d tail: %d\n", 188320731Szbb io_sq->qid, tail); 189320731Szbb 190320731Szbb ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr); 191320731Szbb 192361467Smw if (is_llq_max_tx_burst_exists(io_sq)) { 193368013Smw ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), 194368013Smw "Reset available entries in tx burst for queue %d to %d\n", 195368013Smw io_sq->qid, max_entries_in_tx_burst); 196361467Smw io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; 197361467Smw } 198361467Smw 199320731Szbb return 0; 200320731Szbb} 201320731Szbb 202320731Szbbstatic inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) 203320731Szbb{ 204320731Szbb u16 unreported_comp, head; 205320731Szbb bool need_update; 206320731Szbb 207361534Smw if (unlikely(io_cq->cq_head_db_reg)) { 208361534Smw head = io_cq->head; 209361534Smw unreported_comp = head - io_cq->last_head_update; 210361534Smw need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); 211320731Szbb 212361534Smw if (unlikely(need_update)) { 213368013Smw ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), 214368013Smw "Write completion queue doorbell for queue %d: head: %d\n", 215361534Smw io_cq->qid, head); 216361534Smw ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg); 217361534Smw io_cq->last_head_update = head; 218361534Smw } 219320731Szbb } 220320731Szbb 221320731Szbb return 0; 222320731Szbb} 223320731Szbb 224320731Szbbstatic inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, 225320731Szbb u8 numa_node) 226320731Szbb{ 227320731Szbb struct ena_eth_io_numa_node_cfg_reg numa_cfg; 228320731Szbb 229320731Szbb if (!io_cq->numa_node_cfg_reg) 230320731Szbb return; 231320731Szbb 232320731Szbb numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) 233320731Szbb | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; 234320731Szbb 235320731Szbb ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); 236320731Szbb} 237320731Szbb 238320731Szbbstatic inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) 239320731Szbb{ 240320731Szbb io_sq->next_to_comp += elem; 241320731Szbb} 242320731Szbb 243361467Smwstatic inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) 244361467Smw{ 245361467Smw io_cq->head++; 246361467Smw 247361467Smw /* Switch phase bit in case of wrap around */ 248361467Smw if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) 249361467Smw io_cq->phase ^= 1; 250361467Smw} 251361467Smw 252361467Smwstatic inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, 253361467Smw u16 *req_id) 254361467Smw{ 255361467Smw u8 expected_phase, cdesc_phase; 256361467Smw struct ena_eth_io_tx_cdesc *cdesc; 257361467Smw u16 masked_head; 258361467Smw 259361467Smw masked_head = io_cq->head & (io_cq->q_depth - 1); 260361467Smw expected_phase = io_cq->phase; 261361467Smw 262361467Smw cdesc = (struct ena_eth_io_tx_cdesc *) 263361467Smw ((uintptr_t)io_cq->cdesc_addr.virt_addr + 264361467Smw (masked_head * io_cq->cdesc_entry_size_in_bytes)); 265361467Smw 266361467Smw /* When the current completion descriptor phase isn't the same as the 267361467Smw * expected, it mean that the device still didn't update 268361467Smw * this completion. 269361467Smw */ 270361467Smw cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; 271361467Smw if (cdesc_phase != expected_phase) 272361467Smw return ENA_COM_TRY_AGAIN; 273361467Smw 274361467Smw dma_rmb(); 275361467Smw 276361467Smw *req_id = READ_ONCE16(cdesc->req_id); 277361467Smw if (unlikely(*req_id >= io_cq->q_depth)) { 278368013Smw ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq), 279368013Smw "Invalid req id %d\n", cdesc->req_id); 280361467Smw return ENA_COM_INVAL; 281361467Smw } 282361467Smw 283361467Smw ena_com_cq_inc_head(io_cq); 284361467Smw 285361467Smw return 0; 286361467Smw} 287361467Smw 288320731Szbb#if defined(__cplusplus) 289320731Szbb} 290320731Szbb#endif 291320731Szbb#endif /* ENA_ETH_COM_H_ */ 292