1290650Shselasky/*- 2290650Shselasky * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3290650Shselasky * 4290650Shselasky * Redistribution and use in source and binary forms, with or without 5290650Shselasky * modification, are permitted provided that the following conditions 6290650Shselasky * are met: 7290650Shselasky * 1. Redistributions of source code must retain the above copyright 8290650Shselasky * notice, this list of conditions and the following disclaimer. 9290650Shselasky * 2. Redistributions in binary form must reproduce the above copyright 10290650Shselasky * notice, this list of conditions and the following disclaimer in the 11290650Shselasky * documentation and/or other materials provided with the distribution. 12290650Shselasky * 13290650Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14290650Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15290650Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16290650Shselasky * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17290650Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18290650Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19290650Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20290650Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21290650Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22290650Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23290650Shselasky * SUCH DAMAGE. 24290650Shselasky * 25290650Shselasky * $FreeBSD: releng/10.3/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 294919 2016-01-27 14:59:22Z hselasky $ 26290650Shselasky */ 27290650Shselasky 28290650Shselasky#include "en.h" 29290650Shselasky#include <machine/in_cksum.h> 30290650Shselasky 31290650Shselaskystatic inline int 32290650Shselaskymlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, 33290650Shselasky struct mlx5e_rx_wqe *wqe, u16 ix) 34290650Shselasky{ 35290650Shselasky bus_dma_segment_t segs[1]; 36290650Shselasky struct mbuf *mb; 37290650Shselasky int nsegs; 38290650Shselasky int err; 39290650Shselasky 40290650Shselasky if (rq->mbuf[ix].mbuf != NULL) 41290650Shselasky return (0); 42290650Shselasky 43290650Shselasky mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz); 44290650Shselasky if (unlikely(!mb)) 45290650Shselasky return (-ENOMEM); 46290650Shselasky 47290650Shselasky /* set initial mbuf length */ 48290650Shselasky mb->m_pkthdr.len = mb->m_len = rq->wqe_sz; 49290650Shselasky 50290650Shselasky /* get IP header aligned */ 51290650Shselasky m_adj(mb, MLX5E_NET_IP_ALIGN); 52290650Shselasky 53290650Shselasky err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map, 54290650Shselasky mb, segs, &nsegs, BUS_DMA_NOWAIT); 55290650Shselasky if (err != 0) 56290650Shselasky goto err_free_mbuf; 57290650Shselasky if (unlikely(nsegs != 1)) { 58290650Shselasky bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map); 59290650Shselasky err = -ENOMEM; 60290650Shselasky goto err_free_mbuf; 61290650Shselasky } 62290650Shselasky wqe->data.addr = cpu_to_be64(segs[0].ds_addr); 63290650Shselasky 64290650Shselasky rq->mbuf[ix].mbuf = mb; 65290650Shselasky rq->mbuf[ix].data = mb->m_data; 66290650Shselasky 67290650Shselasky bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map, 68290650Shselasky BUS_DMASYNC_PREREAD); 69290650Shselasky return (0); 70290650Shselasky 71290650Shselaskyerr_free_mbuf: 72290650Shselasky m_freem(mb); 73290650Shselasky return (err); 74290650Shselasky} 75290650Shselasky 76290650Shselaskystatic void 77290650Shselaskymlx5e_post_rx_wqes(struct mlx5e_rq *rq) 78290650Shselasky{ 79290650Shselasky if (unlikely(rq->enabled == 0)) 80290650Shselasky return; 81290650Shselasky 82290650Shselasky while (!mlx5_wq_ll_is_full(&rq->wq)) { 83290650Shselasky struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); 84290650Shselasky 85290650Shselasky if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) 86290650Shselasky break; 87290650Shselasky 88290650Shselasky mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index)); 89290650Shselasky } 90290650Shselasky 91290650Shselasky /* ensure wqes are visible to device before updating doorbell record */ 92290650Shselasky wmb(); 93290650Shselasky 94290650Shselasky mlx5_wq_ll_update_db_record(&rq->wq); 95290650Shselasky} 96290650Shselasky 97290650Shselaskystatic void 98291184Shselaskymlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) 99290650Shselasky{ 100290650Shselasky /* TODO: consider vlans, ip options, ... */ 101290650Shselasky struct ether_header *eh; 102290650Shselasky uint16_t eh_type; 103294918Shselasky uint16_t tot_len; 104290650Shselasky struct ip6_hdr *ip6 = NULL; 105290650Shselasky struct ip *ip4 = NULL; 106290650Shselasky struct tcphdr *th; 107290650Shselasky uint32_t *ts_ptr; 108294918Shselasky uint8_t l4_hdr_type; 109294918Shselasky int tcp_ack; 110290650Shselasky 111290650Shselasky eh = mtod(mb, struct ether_header *); 112290650Shselasky eh_type = ntohs(eh->ether_type); 113290650Shselasky 114294918Shselasky l4_hdr_type = get_cqe_l4_hdr_type(cqe); 115294918Shselasky tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || 116291184Shselasky (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); 117290650Shselasky 118290650Shselasky /* TODO: consider vlan */ 119294918Shselasky tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN; 120290650Shselasky 121290650Shselasky switch (eh_type) { 122290650Shselasky case ETHERTYPE_IP: 123290650Shselasky ip4 = (struct ip *)(eh + 1); 124290650Shselasky th = (struct tcphdr *)(ip4 + 1); 125290650Shselasky break; 126290650Shselasky case ETHERTYPE_IPV6: 127290650Shselasky ip6 = (struct ip6_hdr *)(eh + 1); 128290650Shselasky th = (struct tcphdr *)(ip6 + 1); 129290650Shselasky break; 130290650Shselasky default: 131290650Shselasky return; 132290650Shselasky } 133290650Shselasky 134290650Shselasky ts_ptr = (uint32_t *)(th + 1); 135290650Shselasky 136290650Shselasky if (get_cqe_lro_tcppsh(cqe)) 137291184Shselasky th->th_flags |= TH_PUSH; 138290650Shselasky 139290650Shselasky if (tcp_ack) { 140291184Shselasky th->th_flags |= TH_ACK; 141291184Shselasky th->th_ack = cqe->lro_ack_seq_num; 142291184Shselasky th->th_win = cqe->lro_tcp_win; 143290650Shselasky 144291184Shselasky /* 145291184Shselasky * FreeBSD handles only 32bit aligned timestamp right after 146291184Shselasky * the TCP hdr 147290650Shselasky * +--------+--------+--------+--------+ 148290650Shselasky * | NOP | NOP | TSopt | 10 | 149290650Shselasky * +--------+--------+--------+--------+ 150290650Shselasky * | TSval timestamp | 151290650Shselasky * +--------+--------+--------+--------+ 152290650Shselasky * | TSecr timestamp | 153290650Shselasky * +--------+--------+--------+--------+ 154290650Shselasky */ 155290650Shselasky if (get_cqe_lro_timestamp_valid(cqe) && 156290650Shselasky (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 | 157290650Shselasky TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 | 158290650Shselasky TCPOLEN_TIMESTAMP))) { 159291184Shselasky /* 160291184Shselasky * cqe->timestamp is 64bit long. 161290650Shselasky * [0-31] - timestamp. 162290650Shselasky * [32-64] - timestamp echo replay. 163290650Shselasky */ 164290650Shselasky ts_ptr[1] = *(uint32_t *)&cqe->timestamp; 165290650Shselasky ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); 166290650Shselasky } 167290650Shselasky } 168290650Shselasky if (ip4) { 169291184Shselasky ip4->ip_ttl = cqe->lro_min_ttl; 170291184Shselasky ip4->ip_len = cpu_to_be16(tot_len); 171291184Shselasky ip4->ip_sum = 0; 172291184Shselasky ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2); 173290650Shselasky } else { 174291184Shselasky ip6->ip6_hlim = cqe->lro_min_ttl; 175291184Shselasky ip6->ip6_plen = cpu_to_be16(tot_len - 176290650Shselasky sizeof(struct ip6_hdr)); 177290650Shselasky } 178290650Shselasky /* TODO: handle tcp checksum */ 179290650Shselasky} 180290650Shselasky 181290650Shselaskystatic inline void 182290650Shselaskymlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, 183290650Shselasky struct mlx5e_rq *rq, struct mbuf *mb, 184290650Shselasky u32 cqe_bcnt) 185290650Shselasky{ 186290650Shselasky struct ifnet *ifp = rq->ifp; 187291184Shselasky int lro_num_seg; /* HW LRO session aggregated packets counter */ 188290650Shselasky 189290650Shselasky lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 190290650Shselasky if (lro_num_seg > 1) { 191290650Shselasky mlx5e_lro_update_hdr(mb, cqe); 192290650Shselasky rq->stats.lro_packets++; 193290650Shselasky rq->stats.lro_bytes += cqe_bcnt; 194290650Shselasky } 195290650Shselasky 196290650Shselasky mb->m_pkthdr.len = mb->m_len = cqe_bcnt; 197290650Shselasky /* check if a Toeplitz hash was computed */ 198292195Shselasky if (cqe->rss_hash_type != 0) { 199290650Shselasky mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result); 200292195Shselasky#ifdef RSS 201292195Shselasky /* decode the RSS hash type */ 202292195Shselasky switch (cqe->rss_hash_type & 203292195Shselasky (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) { 204292195Shselasky /* IPv4 */ 205292195Shselasky case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4): 206292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4); 207292195Shselasky break; 208292195Shselasky case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4): 209292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4); 210292195Shselasky break; 211292195Shselasky case CQE_RSS_DST_HTYPE_IPV4: 212292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4); 213292195Shselasky break; 214292195Shselasky /* IPv6 */ 215292195Shselasky case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6): 216292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6); 217292195Shselasky break; 218292195Shselasky case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6): 219292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6); 220292195Shselasky break; 221292195Shselasky case CQE_RSS_DST_HTYPE_IPV6: 222292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6); 223292195Shselasky break; 224292195Shselasky default: /* Other */ 225292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 226292195Shselasky break; 227292195Shselasky } 228292195Shselasky#else 229292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 230292195Shselasky#endif 231292195Shselasky } else { 232290650Shselasky mb->m_pkthdr.flowid = rq->ix; 233292195Shselasky M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 234292195Shselasky } 235290650Shselasky mb->m_pkthdr.rcvif = ifp; 236290650Shselasky 237290650Shselasky if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) && 238290650Shselasky ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) == 239290650Shselasky (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) { 240290650Shselasky mb->m_pkthdr.csum_flags = 241290650Shselasky CSUM_IP_CHECKED | CSUM_IP_VALID | 242290650Shselasky CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 243290650Shselasky mb->m_pkthdr.csum_data = htons(0xffff); 244290650Shselasky } else { 245290650Shselasky rq->stats.csum_none++; 246290650Shselasky } 247290650Shselasky 248290650Shselasky if (cqe_has_vlan(cqe)) { 249290650Shselasky mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info); 250290650Shselasky mb->m_flags |= M_VLANTAG; 251290650Shselasky } 252290650Shselasky} 253290650Shselasky 254293155Shselaskystatic inline void 255293155Shselaskymlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data) 256293155Shselasky{ 257293155Shselasky memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)), 258293155Shselasky sizeof(struct mlx5_cqe64)); 259293155Shselasky} 260293155Shselasky 261293155Shselaskystatic inline void 262293155Shselaskymlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data) 263293155Shselasky{ 264293155Shselasky memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1), 265293155Shselasky data, sizeof(struct mlx5_cqe64)); 266293155Shselasky} 267293155Shselasky 268293155Shselaskystatic inline void 269293155Shselaskymlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title, 270293155Shselasky struct mlx5_mini_cqe8 *mini, 271293155Shselasky u16 wqe_counter, int i) 272293155Shselasky{ 273294919Shselasky /* 274294919Shselasky * NOTE: The fields which are not set here are copied from the 275294919Shselasky * initial and common title. See memcpy() in 276294919Shselasky * mlx5e_write_cqe_slot(). 277294919Shselasky */ 278293155Shselasky title->byte_cnt = mini->byte_cnt; 279293155Shselasky title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1); 280293155Shselasky title->check_sum = mini->checksum; 281293155Shselasky title->op_own = (title->op_own & 0xf0) | 282293155Shselasky (((cq->wq.cc + i) >> cq->wq.log_sz) & 1); 283293155Shselasky} 284293155Shselasky 285293155Shselasky#define MLX5E_MINI_ARRAY_SZ 8 286293155Shselasky/* Make sure structs are not packet differently */ 287293155ShselaskyCTASSERT(sizeof(struct mlx5_cqe64) == 288293155Shselasky sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ); 289293155Shselaskystatic void 290293155Shselaskymlx5e_decompress_cqes(struct mlx5e_cq *cq) 291293155Shselasky{ 292293155Shselasky struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ]; 293293155Shselasky struct mlx5_cqe64 title; 294293155Shselasky u32 cqe_count; 295293155Shselasky u32 i = 0; 296293155Shselasky u16 title_wqe_counter; 297293155Shselasky 298293155Shselasky mlx5e_read_cqe_slot(cq, cq->wq.cc, &title); 299293155Shselasky title_wqe_counter = be16_to_cpu(title.wqe_counter); 300293155Shselasky cqe_count = be32_to_cpu(title.byte_cnt); 301293155Shselasky 302293155Shselasky /* Make sure we won't overflow */ 303293155Shselasky KASSERT(cqe_count <= cq->wq.sz_m1, 304293155Shselasky ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__, 305293155Shselasky cqe_count, cq->wq.sz_m1)); 306293155Shselasky 307293155Shselasky mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array); 308293155Shselasky while (true) { 309293155Shselasky mlx5e_decompress_cqe(cq, &title, 310293155Shselasky &mini_array[i % MLX5E_MINI_ARRAY_SZ], 311293155Shselasky title_wqe_counter, i); 312293155Shselasky mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title); 313293155Shselasky i++; 314293155Shselasky 315293155Shselasky if (i == cqe_count) 316293155Shselasky break; 317293155Shselasky if (i % MLX5E_MINI_ARRAY_SZ == 0) 318293155Shselasky mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array); 319293155Shselasky } 320293155Shselasky} 321293155Shselasky 322290650Shselaskystatic int 323290650Shselaskymlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget) 324290650Shselasky{ 325290650Shselasky#ifndef HAVE_TURBO_LRO 326290650Shselasky struct lro_entry *queued; 327290650Shselasky#endif 328290650Shselasky int i; 329290650Shselasky 330290650Shselasky for (i = 0; i < budget; i++) { 331290650Shselasky struct mlx5e_rx_wqe *wqe; 332290650Shselasky struct mlx5_cqe64 *cqe; 333290650Shselasky struct mbuf *mb; 334290650Shselasky __be16 wqe_counter_be; 335290650Shselasky u16 wqe_counter; 336290650Shselasky u32 byte_cnt; 337290650Shselasky 338290650Shselasky cqe = mlx5e_get_cqe(&rq->cq); 339290650Shselasky if (!cqe) 340290650Shselasky break; 341290650Shselasky 342293155Shselasky if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) 343293155Shselasky mlx5e_decompress_cqes(&rq->cq); 344293155Shselasky 345293155Shselasky mlx5_cqwq_pop(&rq->cq.wq); 346293155Shselasky 347290650Shselasky wqe_counter_be = cqe->wqe_counter; 348290650Shselasky wqe_counter = be16_to_cpu(wqe_counter_be); 349290650Shselasky wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); 350290650Shselasky byte_cnt = be32_to_cpu(cqe->byte_cnt); 351290650Shselasky 352290650Shselasky bus_dmamap_sync(rq->dma_tag, 353290650Shselasky rq->mbuf[wqe_counter].dma_map, 354290650Shselasky BUS_DMASYNC_POSTREAD); 355290650Shselasky 356290650Shselasky if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { 357290650Shselasky rq->stats.wqe_err++; 358290650Shselasky goto wq_ll_pop; 359290650Shselasky } 360290650Shselasky 361290650Shselasky if (MHLEN >= byte_cnt && 362290650Shselasky (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) { 363290650Shselasky bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t), 364290650Shselasky byte_cnt); 365290650Shselasky } else { 366290650Shselasky mb = rq->mbuf[wqe_counter].mbuf; 367290650Shselasky rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */ 368290650Shselasky 369290650Shselasky bus_dmamap_unload(rq->dma_tag, 370290650Shselasky rq->mbuf[wqe_counter].dma_map); 371290650Shselasky } 372290650Shselasky 373290650Shselasky mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt); 374290650Shselasky rq->stats.packets++; 375290650Shselasky#ifdef HAVE_TURBO_LRO 376290650Shselasky if (mb->m_pkthdr.csum_flags == 0 || 377290650Shselasky (rq->ifp->if_capenable & IFCAP_LRO) == 0 || 378290650Shselasky rq->lro.mbuf == NULL) { 379290650Shselasky /* normal input */ 380290650Shselasky rq->ifp->if_input(rq->ifp, mb); 381290650Shselasky } else { 382290650Shselasky tcp_tlro_rx(&rq->lro, mb); 383290650Shselasky } 384290650Shselasky#else 385290650Shselasky if (mb->m_pkthdr.csum_flags == 0 || 386290650Shselasky (rq->ifp->if_capenable & IFCAP_LRO) == 0 || 387290650Shselasky rq->lro.lro_cnt == 0 || 388290650Shselasky tcp_lro_rx(&rq->lro, mb, 0) != 0) { 389290650Shselasky rq->ifp->if_input(rq->ifp, mb); 390290650Shselasky } 391290650Shselasky#endif 392290650Shselaskywq_ll_pop: 393290650Shselasky mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, 394290650Shselasky &wqe->next.next_wqe_index); 395290650Shselasky } 396290650Shselasky 397290650Shselasky mlx5_cqwq_update_db_record(&rq->cq.wq); 398290650Shselasky 399290650Shselasky /* ensure cq space is freed before enabling more cqes */ 400290650Shselasky wmb(); 401290650Shselasky#ifndef HAVE_TURBO_LRO 402290650Shselasky while ((queued = SLIST_FIRST(&rq->lro.lro_active)) != NULL) { 403290650Shselasky SLIST_REMOVE_HEAD(&rq->lro.lro_active, next); 404290650Shselasky tcp_lro_flush(&rq->lro, queued); 405290650Shselasky } 406290650Shselasky#endif 407290650Shselasky return (i); 408290650Shselasky} 409290650Shselasky 410290650Shselaskyvoid 411290650Shselaskymlx5e_rx_cq_comp(struct mlx5_core_cq *mcq) 412290650Shselasky{ 413290650Shselasky struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq); 414290650Shselasky int i = 0; 415290650Shselasky 416290650Shselasky#ifdef HAVE_PER_CQ_EVENT_PACKET 417290650Shselasky struct mbuf *mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz); 418291184Shselasky 419290650Shselasky if (mb != NULL) { 420290650Shselasky /* this code is used for debugging purpose only */ 421290650Shselasky mb->m_pkthdr.len = mb->m_len = 15; 422290650Shselasky memset(mb->m_data, 255, 14); 423290650Shselasky mb->m_data[14] = rq->ix; 424290650Shselasky mb->m_pkthdr.rcvif = rq->ifp; 425290650Shselasky rq->ifp->if_input(rq->ifp, mb); 426290650Shselasky } 427290650Shselasky#endif 428290650Shselasky 429290650Shselasky mtx_lock(&rq->mtx); 430290650Shselasky 431290650Shselasky /* 432290650Shselasky * Polling the entire CQ without posting new WQEs results in 433290650Shselasky * lack of receive WQEs during heavy traffic scenarios. 434290650Shselasky */ 435290650Shselasky while (1) { 436290650Shselasky if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) != 437290650Shselasky MLX5E_RX_BUDGET_MAX) 438290650Shselasky break; 439290650Shselasky i += MLX5E_RX_BUDGET_MAX; 440290650Shselasky if (i >= MLX5E_BUDGET_MAX) 441290650Shselasky break; 442290650Shselasky mlx5e_post_rx_wqes(rq); 443290650Shselasky } 444290650Shselasky mlx5e_post_rx_wqes(rq); 445290650Shselasky mlx5e_cq_arm(&rq->cq); 446290650Shselasky#ifdef HAVE_TURBO_LRO 447290650Shselasky tcp_tlro_flush(&rq->lro, 1); 448290650Shselasky#endif 449290650Shselasky mtx_unlock(&rq->mtx); 450290650Shselasky} 451