1/*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 337741 2018-08-14 11:15:05Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/in_cksum.h> 30 31static inline int 32mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, 33 struct mlx5e_rx_wqe *wqe, u16 ix) 34{ 35 bus_dma_segment_t segs[rq->nsegs]; 36 struct mbuf *mb; 37 int nsegs; 38 int err; 39#if (MLX5E_MAX_RX_SEGS != 1) 40 struct mbuf *mb_head; 41 int i; 42#endif 43 if (rq->mbuf[ix].mbuf != NULL) 44 return (0); 45 46#if (MLX5E_MAX_RX_SEGS == 1) 47 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz); 48 if (unlikely(!mb)) 49 return (-ENOMEM); 50 51 mb->m_pkthdr.len = mb->m_len = rq->wqe_sz; 52#else 53 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 54 MLX5E_MAX_RX_BYTES); 55 if (unlikely(mb == NULL)) 56 return (-ENOMEM); 57 58 mb->m_len = MLX5E_MAX_RX_BYTES; 59 mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES; 60 61 for (i = 1; i < rq->nsegs; i++) { 62 if (mb_head->m_pkthdr.len >= rq->wqe_sz) 63 break; 64 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0, 65 MLX5E_MAX_RX_BYTES); 66 if (unlikely(mb == NULL)) { 67 m_freem(mb_head); 68 return (-ENOMEM); 69 } 70 mb->m_len = MLX5E_MAX_RX_BYTES; 71 mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES; 72 } 73 /* rewind to first mbuf in chain */ 74 mb = mb_head; 75#endif 76 /* get IP header aligned */ 77 m_adj(mb, MLX5E_NET_IP_ALIGN); 78 79 err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map, 80 mb, segs, &nsegs, BUS_DMA_NOWAIT); 81 if (err != 0) 82 goto err_free_mbuf; 83 if (unlikely(nsegs == 0)) { 84 bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map); 85 err = -ENOMEM; 86 goto err_free_mbuf; 87 } 88#if (MLX5E_MAX_RX_SEGS == 1) 89 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr); 90#else 91 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr); 92 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len | 93 MLX5_HW_START_PADDING); 94 for (i = 1; i != nsegs; i++) { 95 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr); 96 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len); 97 } 98 for (; i < rq->nsegs; i++) { 99 wqe->data[i].addr = 0; 100 wqe->data[i].byte_count = 0; 101 } 102#endif 103 104 rq->mbuf[ix].mbuf = mb; 105 rq->mbuf[ix].data = mb->m_data; 106 107 bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map, 108 BUS_DMASYNC_PREREAD); 109 return (0); 110 111err_free_mbuf: 112 m_freem(mb); 113 return (err); 114} 115 116static void 117mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 118{ 119 if (unlikely(rq->enabled == 0)) 120 return; 121 122 while (!mlx5_wq_ll_is_full(&rq->wq)) { 123 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); 124 125 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) { 126 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq); 127 break; 128 } 129 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index)); 130 } 131 132 /* ensure wqes are visible to device before updating doorbell record */ 133 wmb(); 134 135 mlx5_wq_ll_update_db_record(&rq->wq); 136} 137 138static void 139mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) 140{ 141 /* TODO: consider vlans, ip options, ... */ 142 struct ether_header *eh; 143 uint16_t eh_type; 144 uint16_t tot_len; 145 struct ip6_hdr *ip6 = NULL; 146 struct ip *ip4 = NULL; 147 struct tcphdr *th; 148 uint32_t *ts_ptr; 149 uint8_t l4_hdr_type; 150 int tcp_ack; 151 152 eh = mtod(mb, struct ether_header *); 153 eh_type = ntohs(eh->ether_type); 154 155 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 156 tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || 157 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); 158 159 /* TODO: consider vlan */ 160 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN; 161 162 switch (eh_type) { 163 case ETHERTYPE_IP: 164 ip4 = (struct ip *)(eh + 1); 165 th = (struct tcphdr *)(ip4 + 1); 166 break; 167 case ETHERTYPE_IPV6: 168 ip6 = (struct ip6_hdr *)(eh + 1); 169 th = (struct tcphdr *)(ip6 + 1); 170 break; 171 default: 172 return; 173 } 174 175 ts_ptr = (uint32_t *)(th + 1); 176 177 if (get_cqe_lro_tcppsh(cqe)) 178 th->th_flags |= TH_PUSH; 179 180 if (tcp_ack) { 181 th->th_flags |= TH_ACK; 182 th->th_ack = cqe->lro_ack_seq_num; 183 th->th_win = cqe->lro_tcp_win; 184 185 /* 186 * FreeBSD handles only 32bit aligned timestamp right after 187 * the TCP hdr 188 * +--------+--------+--------+--------+ 189 * | NOP | NOP | TSopt | 10 | 190 * +--------+--------+--------+--------+ 191 * | TSval timestamp | 192 * +--------+--------+--------+--------+ 193 * | TSecr timestamp | 194 * +--------+--------+--------+--------+ 195 */ 196 if (get_cqe_lro_timestamp_valid(cqe) && 197 (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 | 198 TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 | 199 TCPOLEN_TIMESTAMP))) { 200 /* 201 * cqe->timestamp is 64bit long. 202 * [0-31] - timestamp. 203 * [32-64] - timestamp echo replay. 204 */ 205 ts_ptr[1] = *(uint32_t *)&cqe->timestamp; 206 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); 207 } 208 } 209 if (ip4) { 210 ip4->ip_ttl = cqe->lro_min_ttl; 211 ip4->ip_len = cpu_to_be16(tot_len); 212 ip4->ip_sum = 0; 213 ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2); 214 } else { 215 ip6->ip6_hlim = cqe->lro_min_ttl; 216 ip6->ip6_plen = cpu_to_be16(tot_len - 217 sizeof(struct ip6_hdr)); 218 } 219 /* TODO: handle tcp checksum */ 220} 221 222static inline void 223mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, 224 struct mlx5e_rq *rq, struct mbuf *mb, 225 u32 cqe_bcnt) 226{ 227 struct ifnet *ifp = rq->ifp; 228#if (MLX5E_MAX_RX_SEGS != 1) 229 struct mbuf *mb_head; 230#endif 231 int lro_num_seg; /* HW LRO session aggregated packets counter */ 232 233 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 234 if (lro_num_seg > 1) { 235 mlx5e_lro_update_hdr(mb, cqe); 236 rq->stats.lro_packets++; 237 rq->stats.lro_bytes += cqe_bcnt; 238 } 239 240#if (MLX5E_MAX_RX_SEGS == 1) 241 mb->m_pkthdr.len = mb->m_len = cqe_bcnt; 242#else 243 mb->m_pkthdr.len = cqe_bcnt; 244 for (mb_head = mb; mb != NULL; mb = mb->m_next) { 245 if (mb->m_len > cqe_bcnt) 246 mb->m_len = cqe_bcnt; 247 cqe_bcnt -= mb->m_len; 248 if (likely(cqe_bcnt == 0)) { 249 if (likely(mb->m_next != NULL)) { 250 /* trim off empty mbufs */ 251 m_freem(mb->m_next); 252 mb->m_next = NULL; 253 } 254 break; 255 } 256 } 257 /* rewind to first mbuf in chain */ 258 mb = mb_head; 259#endif 260 /* check if a Toeplitz hash was computed */ 261 if (cqe->rss_hash_type != 0) { 262 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result); 263#ifdef RSS 264 /* decode the RSS hash type */ 265 switch (cqe->rss_hash_type & 266 (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) { 267 /* IPv4 */ 268 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4): 269 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4); 270 break; 271 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4): 272 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4); 273 break; 274 case CQE_RSS_DST_HTYPE_IPV4: 275 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4); 276 break; 277 /* IPv6 */ 278 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6): 279 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6); 280 break; 281 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6): 282 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6); 283 break; 284 case CQE_RSS_DST_HTYPE_IPV6: 285 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6); 286 break; 287 default: /* Other */ 288 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 289 break; 290 } 291#else 292 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 293#endif 294 } else { 295 mb->m_pkthdr.flowid = rq->ix; 296 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 297 } 298 mb->m_pkthdr.rcvif = ifp; 299 300 if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) && 301 ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) == 302 (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) { 303 mb->m_pkthdr.csum_flags = 304 CSUM_IP_CHECKED | CSUM_IP_VALID | 305 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 306 mb->m_pkthdr.csum_data = htons(0xffff); 307 } else { 308 rq->stats.csum_none++; 309 } 310 311 if (cqe_has_vlan(cqe)) { 312 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info); 313 mb->m_flags |= M_VLANTAG; 314 } 315} 316 317static inline void 318mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data) 319{ 320 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)), 321 sizeof(struct mlx5_cqe64)); 322} 323 324static inline void 325mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data) 326{ 327 memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1), 328 data, sizeof(struct mlx5_cqe64)); 329} 330 331static inline void 332mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title, 333 struct mlx5_mini_cqe8 *mini, 334 u16 wqe_counter, int i) 335{ 336 /* 337 * NOTE: The fields which are not set here are copied from the 338 * initial and common title. See memcpy() in 339 * mlx5e_write_cqe_slot(). 340 */ 341 title->byte_cnt = mini->byte_cnt; 342 title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1); 343 title->check_sum = mini->checksum; 344 title->op_own = (title->op_own & 0xf0) | 345 (((cq->wq.cc + i) >> cq->wq.log_sz) & 1); 346} 347 348#define MLX5E_MINI_ARRAY_SZ 8 349/* Make sure structs are not packet differently */ 350CTASSERT(sizeof(struct mlx5_cqe64) == 351 sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ); 352static void 353mlx5e_decompress_cqes(struct mlx5e_cq *cq) 354{ 355 struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ]; 356 struct mlx5_cqe64 title; 357 u32 cqe_count; 358 u32 i = 0; 359 u16 title_wqe_counter; 360 361 mlx5e_read_cqe_slot(cq, cq->wq.cc, &title); 362 title_wqe_counter = be16_to_cpu(title.wqe_counter); 363 cqe_count = be32_to_cpu(title.byte_cnt); 364 365 /* Make sure we won't overflow */ 366 KASSERT(cqe_count <= cq->wq.sz_m1, 367 ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__, 368 cqe_count, cq->wq.sz_m1)); 369 370 mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array); 371 while (true) { 372 mlx5e_decompress_cqe(cq, &title, 373 &mini_array[i % MLX5E_MINI_ARRAY_SZ], 374 title_wqe_counter, i); 375 mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title); 376 i++; 377 378 if (i == cqe_count) 379 break; 380 if (i % MLX5E_MINI_ARRAY_SZ == 0) 381 mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array); 382 } 383} 384 385static int 386mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget) 387{ 388#ifndef HAVE_TURBO_LRO 389 struct lro_entry *queued; 390#endif 391 int i; 392 393 for (i = 0; i < budget; i++) { 394 struct mlx5e_rx_wqe *wqe; 395 struct mlx5_cqe64 *cqe; 396 struct mbuf *mb; 397 __be16 wqe_counter_be; 398 u16 wqe_counter; 399 u32 byte_cnt; 400 401 cqe = mlx5e_get_cqe(&rq->cq); 402 if (!cqe) 403 break; 404 405 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) 406 mlx5e_decompress_cqes(&rq->cq); 407 408 mlx5_cqwq_pop(&rq->cq.wq); 409 410 wqe_counter_be = cqe->wqe_counter; 411 wqe_counter = be16_to_cpu(wqe_counter_be); 412 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); 413 byte_cnt = be32_to_cpu(cqe->byte_cnt); 414 415 bus_dmamap_sync(rq->dma_tag, 416 rq->mbuf[wqe_counter].dma_map, 417 BUS_DMASYNC_POSTREAD); 418 419 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { 420 rq->stats.wqe_err++; 421 goto wq_ll_pop; 422 } 423 if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt && 424 (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) { 425#if (MLX5E_MAX_RX_SEGS != 1) 426 /* set maximum mbuf length */ 427 mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN; 428#endif 429 /* get IP header aligned */ 430 mb->m_data += MLX5E_NET_IP_ALIGN; 431 432 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t), 433 byte_cnt); 434 } else { 435 mb = rq->mbuf[wqe_counter].mbuf; 436 rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */ 437 438 bus_dmamap_unload(rq->dma_tag, 439 rq->mbuf[wqe_counter].dma_map); 440 } 441 442 mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt); 443 rq->stats.packets++; 444#ifdef HAVE_TURBO_LRO 445 if (mb->m_pkthdr.csum_flags == 0 || 446 (rq->ifp->if_capenable & IFCAP_LRO) == 0 || 447 rq->lro.mbuf == NULL) { 448 /* normal input */ 449 rq->ifp->if_input(rq->ifp, mb); 450 } else { 451 tcp_tlro_rx(&rq->lro, mb); 452 } 453#else 454 if (mb->m_pkthdr.csum_flags == 0 || 455 (rq->ifp->if_capenable & IFCAP_LRO) == 0 || 456 rq->lro.lro_cnt == 0 || 457 tcp_lro_rx(&rq->lro, mb, 0) != 0) { 458 rq->ifp->if_input(rq->ifp, mb); 459 } 460#endif 461wq_ll_pop: 462 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, 463 &wqe->next.next_wqe_index); 464 } 465 466 mlx5_cqwq_update_db_record(&rq->cq.wq); 467 468 /* ensure cq space is freed before enabling more cqes */ 469 wmb(); 470#ifndef HAVE_TURBO_LRO 471 while ((queued = SLIST_FIRST(&rq->lro.lro_active)) != NULL) { 472 SLIST_REMOVE_HEAD(&rq->lro.lro_active, next); 473 tcp_lro_flush(&rq->lro, queued); 474 } 475#endif 476 return (i); 477} 478 479void 480mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq) 481{ 482 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq); 483 int i = 0; 484 485#ifdef HAVE_PER_CQ_EVENT_PACKET 486 struct mbuf *mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz); 487 488 if (mb != NULL) { 489 /* this code is used for debugging purpose only */ 490 mb->m_pkthdr.len = mb->m_len = 15; 491 memset(mb->m_data, 255, 14); 492 mb->m_data[14] = rq->ix; 493 mb->m_pkthdr.rcvif = rq->ifp; 494 rq->ifp->if_input(rq->ifp, mb); 495 } 496#endif 497 498 mtx_lock(&rq->mtx); 499 500 /* 501 * Polling the entire CQ without posting new WQEs results in 502 * lack of receive WQEs during heavy traffic scenarios. 503 */ 504 while (1) { 505 if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) != 506 MLX5E_RX_BUDGET_MAX) 507 break; 508 i += MLX5E_RX_BUDGET_MAX; 509 if (i >= MLX5E_BUDGET_MAX) 510 break; 511 mlx5e_post_rx_wqes(rq); 512 } 513 mlx5e_post_rx_wqes(rq); 514 mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock)); 515#ifdef HAVE_TURBO_LRO 516 tcp_tlro_flush(&rq->lro, 1); 517#endif 518 mtx_unlock(&rq->mtx); 519} 520