Deleted Added
sdiff udiff text old ( 318657 ) new ( 318659 )
full compact
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 18 unchanged lines hidden (view full) ---

27
28
29/*
30 * File: qlnx_os.c
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/qlnx_os.c 318659 2017-05-22 19:28:38Z davidcs $");
36
37#include "qlnx_os.h"
38#include "bcm_osal.h"
39#include "reg_addr.h"
40#include "ecore_gtt_reg_addr.h"
41#include "ecore.h"
42#include "ecore_chain.h"
43#include "ecore_status.h"

--- 254 unchanged lines hidden (view full) ---

298
299 if (p_hwfn == NULL) {
300 printf("%s: spurious slowpath intr\n", __func__);
301 return;
302 }
303
304 ha = (qlnx_host_t *)p_hwfn->p_dev;
305
306 QL_DPRINT2(ha, "enter\n");
307
308 for (i = 0; i < ha->cdev.num_hwfns; i++) {
309 if (&ha->cdev.hwfns[i] == p_hwfn) {
310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
311 break;
312 }
313 }
314 QL_DPRINT2(ha, "exit\n");
315
316 return;
317}
318
319static void
320qlnx_sp_taskqueue(void *context, int pending)
321{
322 struct ecore_hwfn *p_hwfn;

--- 25 unchanged lines hidden (view full) ---

348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
349
350 if (ha->sp_taskqueue[i] == NULL)
351 return (-1);
352
353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
354 tq_name);
355
356 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
357 }
358
359 return (0);
360}
361
362static void
363qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
364{

--- 147 unchanged lines hidden (view full) ---

512 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
513 } else {
514 if (fp->tx_ring_full) {
515 qlnx_mdelay(__func__, 100);
516 }
517 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
518 }
519
520 QL_DPRINT2(ha, "exit ret = %d\n", ret);
521 return;
522}
523
524static int
525qlnx_create_fp_taskqueues(qlnx_host_t *ha)
526{
527 int i;
528 uint8_t tq_name[32];

--- 13 unchanged lines hidden (view full) ---

542 &fp->fp_taskqueue);
543
544 if (fp->fp_taskqueue == NULL)
545 return (-1);
546
547 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
548 tq_name);
549
550 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
551 }
552
553 return (0);
554}
555
556static void
557qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
558{

--- 145 unchanged lines hidden (view full) ---

704 goto qlnx_pci_attach_err;
705 }
706
707 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
708 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
709 else
710 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
711
712 QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
713 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
714 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
715 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
716 ha->pci_reg, rsrc_len_reg,
717 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
718 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
719 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
720 if (pci_alloc_msix(dev, &ha->msix_count)) {
721 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
722 ha->msix_count);
723 ha->msix_count = 0;
724 goto qlnx_pci_attach_err;
725 }
726
727 /*

--- 19 unchanged lines hidden (view full) ---

747 if (bus_setup_intr(dev, ha->sp_irq[i],
748 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
749 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
750 device_printf(dev,
751 "could not setup slow path interrupt\n");
752 goto qlnx_pci_attach_err;
753 }
754
755 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
756 " sp_irq %p sp_handle %p\n", p_hwfn,
757 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
758
759 }
760
761 /*
762 * initialize fast path interrupt
763 */
764 if (qlnx_create_fp_taskqueues(ha) != 0)
765 goto qlnx_pci_attach_err;

--- 26 unchanged lines hidden (view full) ---

792 for (i = 0; i < ha->cdev.num_hwfns; i++) {
793
794 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
795 goto qlnx_pci_attach_err;
796 if (ha->grcdump_size[i] == 0)
797 goto qlnx_pci_attach_err;
798
799 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
800 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
801 i, ha->grcdump_size[i]);
802
803 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
804 if (ha->grcdump[i] == NULL) {
805 device_printf(dev, "grcdump alloc[%d] failed\n", i);
806 goto qlnx_pci_attach_err;
807 }
808
809 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
810 goto qlnx_pci_attach_err;
811 if (ha->idle_chk_size[i] == 0)
812 goto qlnx_pci_attach_err;
813
814 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
815 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
816 i, ha->idle_chk_size[i]);
817
818 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
819
820 if (ha->idle_chk[i] == NULL) {
821 device_printf(dev, "idle_chk alloc failed\n");
822 goto qlnx_pci_attach_err;
823 }
824 }

--- 22 unchanged lines hidden (view full) ---

847 }
848 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
849 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
850 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
851 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
852 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
853 FW_ENGINEERING_VERSION);
854
855 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
856 ha->stormfw_ver, ha->mfw_ver);
857
858 qlnx_init_ifnet(dev, ha);
859
860 /*
861 * add sysctls
862 */
863 qlnx_add_sysctls(ha);
864
865qlnx_pci_attach_err0:
866 /*
867 * create ioctl device interface
868 */
869 if (qlnx_make_cdev(ha)) {
870 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
871 goto qlnx_pci_attach_err;
872 }
873
874 QL_DPRINT2(ha, "success\n");
875
876 return (0);
877
878qlnx_pci_attach_err:
879
880 qlnx_release(ha);
881
882 return (ENXIO);

--- 67 unchanged lines hidden (view full) ---

950static void
951qlnx_release(qlnx_host_t *ha)
952{
953 device_t dev;
954 int i;
955
956 dev = ha->pci_dev;
957
958 QL_DPRINT2(ha, "enter\n");
959
960 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
961 if (ha->idle_chk[i] != NULL) {
962 free(ha->idle_chk[i], M_QLNXBUF);
963 ha->idle_chk[i] = NULL;
964 }
965
966 if (ha->grcdump[i] != NULL) {

--- 66 unchanged lines hidden (view full) ---

1033 if (ha->pci_dbells)
1034 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1035 ha->pci_dbells);
1036
1037 if (ha->msix_bar)
1038 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1039 ha->msix_bar);
1040
1041 QL_DPRINT2(ha, "exit\n");
1042 return;
1043}
1044
1045static void
1046qlnx_trigger_dump(qlnx_host_t *ha)
1047{
1048 int i;
1049
1050 if (ha->ifp != NULL)
1051 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1052
1053 QL_DPRINT2(ha, "enter\n");
1054
1055 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1056 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1057 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1058 }
1059
1060 QL_DPRINT2(ha, "exit\n");
1061
1062 return;
1063}
1064
1065static int
1066qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1067{
1068 int err, ret = 0;

--- 702 unchanged lines hidden (view full) ---

1771 OID_AUTO, "personality", CTLFLAG_RD,
1772 &ha->personality, ha->personality,
1773 "\tpersonality = 0 => Ethernet Only\n"
1774 "\tpersonality = 3 => Ethernet and RoCE\n"
1775 "\tpersonality = 4 => Ethernet and iWARP\n"
1776 "\tpersonality = 6 => Default in Shared Memory\n");
1777
1778 ha->dbg_level = 0;
1779 SYSCTL_ADD_UINT(ctx, children,
1780 OID_AUTO, "debug", CTLFLAG_RW,
1781 &ha->dbg_level, ha->dbg_level, "Debug Level");
1782
1783 ha->dp_level = 0x01;
1784 SYSCTL_ADD_UINT(ctx, children,
1785 OID_AUTO, "dp_level", CTLFLAG_RW,
1786 &ha->dp_level, ha->dp_level, "DP Level");
1787
1788 ha->dbg_trace_lro_cnt = 0;
1789 SYSCTL_ADD_UINT(ctx, children,
1790 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1791 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,

--- 198 unchanged lines hidden (view full) ---

1990 }
1991
1992 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
1993 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
1994
1995
1996 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
1997
1998 QL_DPRINT2(ha, "exit\n");
1999
2000 return;
2001}
2002
2003static void
2004qlnx_init_locked(qlnx_host_t *ha)
2005{
2006 struct ifnet *ifp = ha->ifp;
2007
2008 QL_DPRINT1(ha, "Driver Initialization start \n");
2009
2010 qlnx_stop(ha);
2011
2012 if (qlnx_load(ha) == 0) {
2013 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2014 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2015 }
2016
2017 return;
2018}
2019
2020static void
2021qlnx_init(void *arg)
2022{
2023 qlnx_host_t *ha;
2024
2025 ha = (qlnx_host_t *)arg;
2026
2027 QL_DPRINT2(ha, "enter\n");
2028
2029 QLNX_LOCK(ha);
2030 qlnx_init_locked(ha);
2031 QLNX_UNLOCK(ha);
2032
2033 QL_DPRINT2(ha, "exit\n");
2034
2035 return;
2036}
2037
2038static int
2039qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2040{
2041 struct ecore_filter_mcast *mcast;

--- 172 unchanged lines hidden (view full) ---

2214 struct ifreq *ifr = (struct ifreq *)data;
2215 struct ifaddr *ifa = (struct ifaddr *)data;
2216 qlnx_host_t *ha;
2217
2218 ha = (qlnx_host_t *)ifp->if_softc;
2219
2220 switch (cmd) {
2221 case SIOCSIFADDR:
2222 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2223
2224 if (ifa->ifa_addr->sa_family == AF_INET) {
2225 ifp->if_flags |= IFF_UP;
2226 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2227 QLNX_LOCK(ha);
2228 qlnx_init_locked(ha);
2229 QLNX_UNLOCK(ha);
2230 }
2231 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2232 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2233
2234 arp_ifinit(ifp, ifa);
2235 } else {
2236 ether_ioctl(ifp, cmd, data);
2237 }
2238 break;
2239
2240 case SIOCSIFMTU:
2241 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2242
2243 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2244 ret = EINVAL;
2245 } else {
2246 QLNX_LOCK(ha);
2247 ifp->if_mtu = ifr->ifr_mtu;
2248 ha->max_frame_size =
2249 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2250 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2251 qlnx_init_locked(ha);
2252 }
2253
2254 QLNX_UNLOCK(ha);
2255 }
2256
2257 break;
2258
2259 case SIOCSIFFLAGS:
2260 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2261
2262 QLNX_LOCK(ha);
2263
2264 if (ifp->if_flags & IFF_UP) {
2265 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2266 if ((ifp->if_flags ^ ha->if_flags) &
2267 IFF_PROMISC) {
2268 ret = qlnx_set_promisc(ha);

--- 11 unchanged lines hidden (view full) ---

2280 qlnx_stop(ha);
2281 ha->if_flags = ifp->if_flags;
2282 }
2283
2284 QLNX_UNLOCK(ha);
2285 break;
2286
2287 case SIOCADDMULTI:
2288 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2289
2290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2291 if (qlnx_set_multi(ha, 1))
2292 ret = EINVAL;
2293 }
2294 break;
2295
2296 case SIOCDELMULTI:
2297 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2298
2299 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2300 if (qlnx_set_multi(ha, 0))
2301 ret = EINVAL;
2302 }
2303 break;
2304
2305 case SIOCSIFMEDIA:
2306 case SIOCGIFMEDIA:
2307 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2308
2309 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2310 break;
2311
2312 case SIOCSIFCAP:
2313
2314 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2315
2316 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2317
2318 if (mask & IFCAP_HWCSUM)
2319 ifp->if_capenable ^= IFCAP_HWCSUM;
2320 if (mask & IFCAP_TSO4)
2321 ifp->if_capenable ^= IFCAP_TSO4;
2322 if (mask & IFCAP_TSO6)
2323 ifp->if_capenable ^= IFCAP_TSO6;
2324 if (mask & IFCAP_VLAN_HWTAGGING)

--- 26 unchanged lines hidden (view full) ---

2351 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2352 ret = EINVAL;
2353 break;
2354 }
2355
2356 p_ptt = ecore_ptt_acquire(p_hwfn);
2357
2358 if (!p_ptt) {
2359 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2360 ret = -1;
2361 break;
2362 }
2363
2364 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2365 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2366 i2c.len, &i2c.data[0]);
2367
2368 ecore_ptt_release(p_hwfn, p_ptt);
2369
2370 if (ret) {
2371 ret = -1;
2372 break;
2373 }
2374
2375 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2376
2377 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2378 len = %d addr = 0x%02x offset = 0x%04x \
2379 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2380 0x%02x 0x%02x 0x%02x\n",
2381 ret, i2c.len, i2c.dev_addr, i2c.offset,
2382 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2383 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2384 break;
2385 }
2386#endif /* #if (__FreeBSD_version >= 1100101) */
2387
2388 default:
2389 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2390 ret = ether_ioctl(ifp, cmd, data);
2391 break;
2392 }
2393
2394 return (ret);
2395}
2396
2397static int
2398qlnx_media_change(struct ifnet *ifp)
2399{
2400 qlnx_host_t *ha;
2401 struct ifmedia *ifm;
2402 int ret = 0;
2403
2404 ha = (qlnx_host_t *)ifp->if_softc;
2405
2406 QL_DPRINT2(ha, "enter\n");
2407
2408 ifm = &ha->media;
2409
2410 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2411 ret = EINVAL;
2412
2413 QL_DPRINT2(ha, "exit\n");
2414
2415 return (ret);
2416}
2417
2418static void
2419qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2420{
2421 qlnx_host_t *ha;
2422
2423 ha = (qlnx_host_t *)ifp->if_softc;
2424
2425 QL_DPRINT2(ha, "enter\n");
2426
2427 ifmr->ifm_status = IFM_AVALID;
2428 ifmr->ifm_active = IFM_ETHER;
2429
2430 if (ha->link_up) {
2431 ifmr->ifm_status |= IFM_ACTIVE;
2432 ifmr->ifm_active |=
2433 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2434
2435 if (ha->if_link.link_partner_caps &
2436 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2437 ifmr->ifm_active |=
2438 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2439 }
2440
2441 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2442
2443 return;
2444}
2445
2446
2447static void
2448qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2449 struct qlnx_tx_queue *txq)

--- 9 unchanged lines hidden (view full) ---

2459 idx = txq->sw_tx_cons;
2460 mp = txq->sw_tx_ring[idx].mp;
2461 map = txq->sw_tx_ring[idx].map;
2462
2463 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2464
2465 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2466
2467 QL_DPRINT1(ha, "(mp == NULL) "
2468 " tx_idx = 0x%x"
2469 " ecore_prod_idx = 0x%x"
2470 " ecore_cons_idx = 0x%x"
2471 " hw_bd_cons = 0x%x"
2472 " txq_db_last = 0x%x"
2473 " elem_left = 0x%x\n",
2474 fp->rss_id,
2475 ecore_chain_get_prod_idx(&txq->tx_pbl),
2476 ecore_chain_get_cons_idx(&txq->tx_pbl),
2477 le16toh(*txq->hw_cons_ptr),
2478 txq->tx_db.raw,
2479 ecore_chain_get_elem_left(&txq->tx_pbl));
2480
2481 fp->err_tx_free_pkt_null++;
2482
2483 //DEBUG
2484 qlnx_trigger_dump(ha);
2485
2486 return;
2487 } else {

--- 44 unchanged lines hidden (view full) ---

2532 } else {
2533 diff = hw_bd_cons - ecore_cons_idx;
2534 }
2535 if ((diff > TX_RING_SIZE) ||
2536 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2537
2538 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2539
2540 QL_DPRINT1(ha, "(diff = 0x%x) "
2541 " tx_idx = 0x%x"
2542 " ecore_prod_idx = 0x%x"
2543 " ecore_cons_idx = 0x%x"
2544 " hw_bd_cons = 0x%x"
2545 " txq_db_last = 0x%x"
2546 " elem_left = 0x%x\n",
2547 diff,
2548 fp->rss_id,
2549 ecore_chain_get_prod_idx(&txq->tx_pbl),
2550 ecore_chain_get_cons_idx(&txq->tx_pbl),
2551 le16toh(*txq->hw_cons_ptr),
2552 txq->tx_db.raw,
2553 ecore_chain_get_elem_left(&txq->tx_pbl));
2554
2555 fp->err_tx_cons_idx_conflict++;
2556
2557 //DEBUG
2558 qlnx_trigger_dump(ha);
2559 }
2560
2561 qlnx_free_tx_pkt(ha, fp, txq);

--- 5 unchanged lines hidden (view full) ---

2567
2568static int
2569qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
2570{
2571 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
2572 struct qlnx_fastpath *fp;
2573 int rss_id = 0, ret = 0;
2574
2575 QL_DPRINT2(ha, "enter\n");
2576
2577#if __FreeBSD_version >= 1100000
2578 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2579#else
2580 if (mp->m_flags & M_FLOWID)
2581#endif
2582 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2583 ha->num_rss;

--- 11 unchanged lines hidden (view full) ---

2595
2596 if (fp->fp_taskqueue != NULL)
2597 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2598
2599 ret = 0;
2600
2601qlnx_transmit_exit:
2602
2603 QL_DPRINT2(ha, "exit ret = %d\n", ret);
2604 return ret;
2605}
2606
2607static void
2608qlnx_qflush(struct ifnet *ifp)
2609{
2610 int rss_id;
2611 struct qlnx_fastpath *fp;
2612 struct mbuf *mp;
2613 qlnx_host_t *ha;
2614
2615 ha = (qlnx_host_t *)ifp->if_softc;
2616
2617 QL_DPRINT2(ha, "enter\n");
2618
2619 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2620
2621 fp = &ha->fp_array[rss_id];
2622
2623 if (fp == NULL)
2624 continue;
2625
2626 if (fp->tx_br) {
2627 mtx_lock(&fp->tx_mtx);
2628
2629 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2630 fp->tx_pkts_freed++;
2631 m_freem(mp);
2632 }
2633 mtx_unlock(&fp->tx_mtx);
2634 }
2635 }
2636 QL_DPRINT2(ha, "exit\n");
2637
2638 return;
2639}
2640
2641static void
2642qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2643{
2644 struct ecore_dev *cdev;

--- 127 unchanged lines hidden (view full) ---

2772 struct eth_tx_2nd_bd *second_bd;
2773 struct eth_tx_3rd_bd *third_bd;
2774 struct eth_tx_bd *tx_data_bd;
2775
2776 int seg_idx = 0;
2777 uint32_t nbds_in_hdr = 0;
2778 uint32_t offset = 0;
2779
2780 QL_DPRINT8(ha, "enter\n");
2781
2782 if (!ha->link_up)
2783 return (-1);
2784
2785 first_bd = NULL;
2786 second_bd = NULL;
2787 third_bd = NULL;
2788 tx_data_bd = NULL;

--- 35 unchanged lines hidden (view full) ---

2824 if ((ret == EFBIG) ||
2825 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2826 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2827 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2828 qlnx_tso_check(fp, segs, nsegs, offset))))) {
2829
2830 struct mbuf *m;
2831
2832 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
2833
2834 fp->tx_defrag++;
2835
2836 m = m_defrag(m_head, M_NOWAIT);
2837 if (m == NULL) {
2838 fp->err_tx_defrag++;
2839 fp->tx_pkts_freed++;
2840 m_freem(m_head);
2841 *m_headp = NULL;
2842 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
2843 return (ENOBUFS);
2844 }
2845
2846 m_head = m;
2847 *m_headp = m_head;
2848
2849 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2850 segs, &nsegs, BUS_DMA_NOWAIT))) {
2851
2852 fp->err_tx_defrag_dmamap_load++;
2853
2854 QL_DPRINT1(ha,
2855 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
2856 ret, m_head->m_pkthdr.len);
2857
2858 fp->tx_pkts_freed++;
2859 m_freem(m_head);
2860 *m_headp = NULL;
2861
2862 return (ret);
2863 }
2864
2865 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2866 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2867
2868 fp->err_tx_non_tso_max_seg++;
2869
2870 QL_DPRINT1(ha,
2871 "(%d) nsegs too many for non-TSO [%d, %d]\n",
2872 ret, nsegs, m_head->m_pkthdr.len);
2873
2874 fp->tx_pkts_freed++;
2875 m_freem(m_head);
2876 *m_headp = NULL;
2877
2878 return (ret);
2879 }
2880 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2881 offset = qlnx_tcp_offset(ha, m_head);
2882
2883 } else if (ret) {
2884
2885 fp->err_tx_dmamap_load++;
2886
2887 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
2888 ret, m_head->m_pkthdr.len);
2889 fp->tx_pkts_freed++;
2890 m_freem(m_head);
2891 *m_headp = NULL;
2892 return (ret);
2893 }
2894
2895 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2896
2897 if (ha->dbg_trace_tso_pkt_len) {
2898 if (nsegs < QLNX_FP_MAX_SEGS)
2899 fp->tx_pkts[(nsegs - 1)]++;
2900 else
2901 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
2902 }
2903
2904 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2905 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2906
2907 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
2908 " in chain[%d] trying to free packets\n",
2909 nsegs, elem_left, fp->rss_id);
2910
2911 fp->tx_nsegs_gt_elem_left++;
2912
2913 (void)qlnx_tx_int(ha, fp, txq);
2914
2915 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2916 ecore_chain_get_elem_left(&txq->tx_pbl))) {
2917
2918 QL_DPRINT1(ha,
2919 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
2920 nsegs, elem_left, fp->rss_id);
2921
2922 fp->err_tx_nsegs_gt_elem_left++;
2923 fp->tx_ring_full = 1;
2924 ha->storm_stats_enable = 1;
2925 return (ENOBUFS);
2926 }
2927 }
2928

--- 211 unchanged lines hidden (view full) ---

3140 txq->sw_tx_ring[idx].nsegs = nsegs;
3141 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3142
3143 txq->tx_db.data.bd_prod =
3144 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3145
3146 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3147
3148 QL_DPRINT8(ha, "exit\n");
3149 return (0);
3150}
3151
3152static void
3153qlnx_stop(qlnx_host_t *ha)
3154{
3155 struct ifnet *ifp = ha->ifp;
3156 device_t dev;
3157 int i;
3158
3159 dev = ha->pci_dev;
3160
3161 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3162
3163 /*
3164 * We simply lock and unlock each fp->tx_mtx to
3165 * propagate the if_drv_flags
3166 * state to each tx thread
3167 */
3168 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3169
3170 if (ha->state == QLNX_STATE_OPEN) {
3171 for (i = 0; i < ha->num_rss; i++) {
3172 struct qlnx_fastpath *fp = &ha->fp_array[i];
3173
3174 mtx_lock(&fp->tx_mtx);
3175 mtx_unlock(&fp->tx_mtx);
3176
3177 if (fp->fp_taskqueue != NULL)

--- 76 unchanged lines hidden (view full) ---

3254 while (len) {
3255
3256 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3257
3258 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3259 mp = sw_rx_data->data;
3260
3261 if (mp == NULL) {
3262 QL_DPRINT1(ha, "mp = NULL\n");
3263 fp->err_rx_mp_null++;
3264 rxq->sw_rx_cons =
3265 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3266
3267 if (mpf != NULL)
3268 m_freem(mpf);
3269
3270 return (-1);
3271 }
3272 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3273 BUS_DMASYNC_POSTREAD);
3274
3275 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3276
3277 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3278 " incoming packet and reusing its buffer\n");
3279
3280 qlnx_reuse_rx_data(rxq);
3281 fp->err_rx_alloc_errors++;
3282
3283 if (mpf != NULL)
3284 m_freem(mpf);
3285
3286 return (-1);

--- 43 unchanged lines hidden (view full) ---

3330 device_t dev;
3331#if __FreeBSD_version >= 1100000
3332 uint8_t hash_type;
3333#endif /* #if __FreeBSD_version >= 1100000 */
3334
3335 dev = ha->pci_dev;
3336 agg_index = cqe->tpa_agg_index;
3337
3338 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3339 \t type = 0x%x\n \
3340 \t bitfields = 0x%x\n \
3341 \t seg_len = 0x%x\n \
3342 \t pars_flags = 0x%x\n \
3343 \t vlan_tag = 0x%x\n \
3344 \t rss_hash = 0x%x\n \
3345 \t len_on_first_bd = 0x%x\n \
3346 \t placement_offset = 0x%x\n \
3347 \t tpa_agg_index = 0x%x\n \
3348 \t header_len = 0x%x\n \
3349 \t ext_bd_len_list[0] = 0x%x\n \
3350 \t ext_bd_len_list[1] = 0x%x\n \
3351 \t ext_bd_len_list[2] = 0x%x\n \
3352 \t ext_bd_len_list[3] = 0x%x\n \
3353 \t ext_bd_len_list[4] = 0x%x\n",
3354 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3355 cqe->pars_flags.flags, cqe->vlan_tag,
3356 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3357 cqe->tpa_agg_index, cqe->header_len,
3358 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3359 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3360 cqe->ext_bd_len_list[4]);
3361
3362 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3363 fp->err_rx_tpa_invalid_agg_num++;
3364 return;
3365 }
3366
3367 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3368 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3369 mp = sw_rx_data->data;
3370
3371 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3372
3373 if (mp == NULL) {
3374 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3375 fp->err_rx_mp_null++;
3376 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3377
3378 return;
3379 }
3380
3381 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3382
3383 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3384 " flags = %x, dropping incoming packet\n", fp->rss_id,
3385 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3386
3387 fp->err_rx_hw_errors++;
3388
3389 qlnx_reuse_rx_data(rxq);
3390
3391 QLNX_INC_IERRORS(ifp);
3392
3393 return;
3394 }
3395
3396 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3397
3398 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3399 " dropping incoming packet and reusing its buffer\n",
3400 fp->rss_id);
3401
3402 fp->err_rx_alloc_errors++;
3403 QLNX_INC_IQDROPS(ifp);
3404
3405 /*
3406 * Load the tpa mbuf into the rx ring and save the
3407 * posted mbuf
3408 */

--- 35 unchanged lines hidden (view full) ---

3444 }
3445
3446 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3447 return;
3448 }
3449
3450 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3451
3452 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3453 " dropping incoming packet and reusing its buffer\n",
3454 fp->rss_id);
3455
3456 QLNX_INC_IQDROPS(ifp);
3457
3458 /* if we already have mbuf head in aggregation free it */
3459 if (rxq->tpa_info[agg_index].mpf) {
3460 m_freem(rxq->tpa_info[agg_index].mpf);
3461 rxq->tpa_info[agg_index].mpl = NULL;
3462 }

--- 20 unchanged lines hidden (view full) ---

3483 * first process the ext_bd_len_list
3484 * if this fails then we simply drop the packet
3485 */
3486 ecore_chain_consume(&rxq->rx_bd_ring);
3487 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3488
3489 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3490
3491 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3492
3493 if (cqe->ext_bd_len_list[i] == 0)
3494 break;
3495
3496 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3497 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3498 BUS_DMASYNC_POSTREAD);
3499
3500 mpc = sw_rx_data->data;
3501
3502 if (mpc == NULL) {
3503 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3504 fp->err_rx_mp_null++;
3505 if (mpf != NULL)
3506 m_freem(mpf);
3507 mpf = mpl = NULL;
3508 rxq->tpa_info[agg_index].agg_state =
3509 QLNX_AGG_STATE_ERROR;
3510 ecore_chain_consume(&rxq->rx_bd_ring);
3511 rxq->sw_rx_cons =
3512 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3513 continue;
3514 }
3515
3516 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3517 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3518 " dropping incoming packet and reusing its"
3519 " buffer\n", fp->rss_id);
3520
3521 qlnx_reuse_rx_data(rxq);
3522
3523 if (mpf != NULL)
3524 m_freem(mpf);
3525 mpf = mpl = NULL;
3526
3527 rxq->tpa_info[agg_index].agg_state =

--- 21 unchanged lines hidden (view full) ---

3549
3550 ecore_chain_consume(&rxq->rx_bd_ring);
3551 rxq->sw_rx_cons =
3552 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3553 }
3554
3555 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3556
3557 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3558 " incoming packet and reusing its buffer\n",
3559 fp->rss_id);
3560
3561 QLNX_INC_IQDROPS(ifp);
3562
3563 rxq->tpa_info[agg_index].mpf = mp;
3564 rxq->tpa_info[agg_index].mpl = NULL;
3565
3566 return;
3567 }

--- 63 unchanged lines hidden (view full) ---

3631
3632 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3633 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3634 mp->m_flags |= M_VLANTAG;
3635 }
3636
3637 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3638
3639 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3640 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3641 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3642
3643 return;
3644}
3645
3646static void
3647qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3648 struct qlnx_rx_queue *rxq,
3649 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3650{
3651 struct sw_rx_data *sw_rx_data;
3652 int i;
3653 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3654 struct mbuf *mp;
3655 uint32_t agg_index;
3656 device_t dev;
3657
3658 dev = ha->pci_dev;
3659
3660 QL_DPRINT7(ha, "[%d]: enter\n \
3661 \t type = 0x%x\n \
3662 \t tpa_agg_index = 0x%x\n \
3663 \t len_list[0] = 0x%x\n \
3664 \t len_list[1] = 0x%x\n \
3665 \t len_list[2] = 0x%x\n \
3666 \t len_list[3] = 0x%x\n \
3667 \t len_list[4] = 0x%x\n \
3668 \t len_list[5] = 0x%x\n",
3669 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3670 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3671 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
3672
3673 agg_index = cqe->tpa_agg_index;
3674
3675 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3676 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3677 fp->err_rx_tpa_invalid_agg_num++;
3678 return;
3679 }
3680
3681
3682 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3683
3684 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3685
3686 if (cqe->len_list[i] == 0)
3687 break;
3688
3689 if (rxq->tpa_info[agg_index].agg_state !=
3690 QLNX_AGG_STATE_START) {
3691 qlnx_reuse_rx_data(rxq);
3692 continue;
3693 }
3694
3695 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3696 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3697 BUS_DMASYNC_POSTREAD);
3698
3699 mpc = sw_rx_data->data;
3700
3701 if (mpc == NULL) {
3702
3703 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3704
3705 fp->err_rx_mp_null++;
3706 if (mpf != NULL)
3707 m_freem(mpf);
3708 mpf = mpl = NULL;
3709 rxq->tpa_info[agg_index].agg_state =
3710 QLNX_AGG_STATE_ERROR;
3711 ecore_chain_consume(&rxq->rx_bd_ring);
3712 rxq->sw_rx_cons =
3713 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3714 continue;
3715 }
3716
3717 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3718
3719 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3720 " dropping incoming packet and reusing its"
3721 " buffer\n", fp->rss_id);
3722
3723 qlnx_reuse_rx_data(rxq);
3724
3725 if (mpf != NULL)
3726 m_freem(mpf);
3727 mpf = mpl = NULL;
3728
3729 rxq->tpa_info[agg_index].agg_state =

--- 19 unchanged lines hidden (view full) ---

3749 mpl = mpc;
3750 }
3751
3752 ecore_chain_consume(&rxq->rx_bd_ring);
3753 rxq->sw_rx_cons =
3754 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3755 }
3756
3757 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3758 fp->rss_id, mpf, mpl);
3759
3760 if (mpf != NULL) {
3761 mp = rxq->tpa_info[agg_index].mpl;
3762 mp->m_len = ha->rx_buf_size;
3763 mp->m_next = mpf;
3764 rxq->tpa_info[agg_index].mpl = mpl;
3765 }
3766

--- 11 unchanged lines hidden (view full) ---

3778 struct mbuf *mp;
3779 uint32_t agg_index;
3780 uint32_t len = 0;
3781 struct ifnet *ifp = ha->ifp;
3782 device_t dev;
3783
3784 dev = ha->pci_dev;
3785
3786 QL_DPRINT7(ha, "[%d]: enter\n \
3787 \t type = 0x%x\n \
3788 \t tpa_agg_index = 0x%x\n \
3789 \t total_packet_len = 0x%x\n \
3790 \t num_of_bds = 0x%x\n \
3791 \t end_reason = 0x%x\n \
3792 \t num_of_coalesced_segs = 0x%x\n \
3793 \t ts_delta = 0x%x\n \
3794 \t len_list[0] = 0x%x\n \
3795 \t len_list[1] = 0x%x\n \
3796 \t len_list[2] = 0x%x\n \
3797 \t len_list[3] = 0x%x\n",
3798 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3799 cqe->total_packet_len, cqe->num_of_bds,
3800 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3801 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3802 cqe->len_list[3]);
3803
3804 agg_index = cqe->tpa_agg_index;
3805
3806 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3807
3808 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3809
3810 fp->err_rx_tpa_invalid_agg_num++;
3811 return (0);
3812 }
3813
3814
3815 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3816
3817 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3818
3819 if (cqe->len_list[i] == 0)
3820 break;
3821
3822 if (rxq->tpa_info[agg_index].agg_state !=
3823 QLNX_AGG_STATE_START) {
3824
3825 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
3826
3827 qlnx_reuse_rx_data(rxq);
3828 continue;
3829 }
3830
3831 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3832 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3833 BUS_DMASYNC_POSTREAD);
3834
3835 mpc = sw_rx_data->data;
3836
3837 if (mpc == NULL) {
3838
3839 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3840
3841 fp->err_rx_mp_null++;
3842 if (mpf != NULL)
3843 m_freem(mpf);
3844 mpf = mpl = NULL;
3845 rxq->tpa_info[agg_index].agg_state =
3846 QLNX_AGG_STATE_ERROR;
3847 ecore_chain_consume(&rxq->rx_bd_ring);
3848 rxq->sw_rx_cons =
3849 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3850 continue;
3851 }
3852
3853 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3854 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3855 " dropping incoming packet and reusing its"
3856 " buffer\n", fp->rss_id);
3857
3858 qlnx_reuse_rx_data(rxq);
3859
3860 if (mpf != NULL)
3861 m_freem(mpf);
3862 mpf = mpl = NULL;
3863
3864 rxq->tpa_info[agg_index].agg_state =

--- 19 unchanged lines hidden (view full) ---

3884 mpl = mpc;
3885 }
3886
3887 ecore_chain_consume(&rxq->rx_bd_ring);
3888 rxq->sw_rx_cons =
3889 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3890 }
3891
3892 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
3893
3894 if (mpf != NULL) {
3895
3896 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
3897
3898 mp = rxq->tpa_info[agg_index].mpl;
3899 mp->m_len = ha->rx_buf_size;
3900 mp->m_next = mpf;
3901 }
3902
3903 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3904
3905 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
3906
3907 if (rxq->tpa_info[agg_index].mpf != NULL)
3908 m_freem(rxq->tpa_info[agg_index].mpf);
3909 rxq->tpa_info[agg_index].mpf = NULL;
3910 rxq->tpa_info[agg_index].mpl = NULL;
3911 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3912 return (0);
3913 }

--- 16 unchanged lines hidden (view full) ---

3930 mpl = rxq->tpa_info[agg_index].mpl;
3931 mpl->m_len += (cqe->total_packet_len - len);
3932 }
3933 }
3934
3935 QLNX_INC_IPACKETS(ifp);
3936 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3937
3938 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
3939 m_len = 0x%x m_pkthdr_len = 0x%x\n",
3940 fp->rss_id, mp->m_pkthdr.csum_data,
3941 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
3942
3943 (*ifp->if_input)(ifp, mp);
3944
3945 rxq->tpa_info[agg_index].mpf = NULL;
3946 rxq->tpa_info[agg_index].mpl = NULL;
3947 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3948
3949 return (cqe->num_of_coalesced_segs);

--- 41 unchanged lines hidden (view full) ---

3991#endif /* #if __FreeBSD_version >= 1100000 */
3992
3993 /* Get the CQE from the completion ring */
3994 cqe = (union eth_rx_cqe *)
3995 ecore_chain_consume(&rxq->rx_comp_ring);
3996 cqe_type = cqe->fast_path_regular.type;
3997
3998 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
3999 QL_DPRINT3(ha, "Got a slowath CQE\n");
4000
4001 ecore_eth_cqe_completion(p_hwfn,
4002 (struct eth_slow_path_rx_cqe *)cqe);
4003 goto next_cqe;
4004 }
4005
4006 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4007

--- 24 unchanged lines hidden (view full) ---

4032 goto next_cqe;
4033 }
4034
4035 /* Get the data from the SW ring */
4036 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4037 mp = sw_rx_data->data;
4038
4039 if (mp == NULL) {
4040 QL_DPRINT1(ha, "mp = NULL\n");
4041 fp->err_rx_mp_null++;
4042 rxq->sw_rx_cons =
4043 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4044 goto next_cqe;
4045 }
4046 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4047 BUS_DMASYNC_POSTREAD);
4048
4049 /* non GRO */
4050 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4051 len = le16toh(fp_cqe->pkt_len);
4052 pad = fp_cqe->placement_offset;
4053
4054 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4055 " len %u, parsing flags = %d pad = %d\n",
4056 cqe_type, fp_cqe->bitfields,
4057 le16toh(fp_cqe->vlan_tag),
4058 len, le16toh(fp_cqe->pars_flags.flags), pad);
4059
4060 data = mtod(mp, uint8_t *);
4061 data = data + pad;
4062
4063 if (0)
4064 qlnx_dump_buf8(ha, __func__, data, len);
4065
4066 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4067 * is always with a fixed size. If allocation fails, we take the
4068 * consumed BD and return it to the ring in the PROD position.
4069 * The packet that was received on that BD will be dropped (and
4070 * not passed to the upper stack).
4071 */
4072 /* If this is an error packet then drop it */
4073 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4074 CQE_FLAGS_ERR) {
4075
4076 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4077 " dropping incoming packet\n", sw_comp_cons,
4078 le16toh(cqe->fast_path_regular.pars_flags.flags));
4079 fp->err_rx_hw_errors++;
4080
4081 qlnx_reuse_rx_data(rxq);
4082
4083 QLNX_INC_IERRORS(ifp);
4084
4085 goto next_cqe;
4086 }
4087
4088 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4089
4090 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4091 " incoming packet and reusing its buffer\n");
4092 qlnx_reuse_rx_data(rxq);
4093
4094 fp->err_rx_alloc_errors++;
4095
4096 QLNX_INC_IQDROPS(ifp);
4097
4098 goto next_cqe;
4099 }
4100
4101 ecore_chain_consume(&rxq->rx_bd_ring);
4102
4103 len_on_first_bd = fp_cqe->len_on_first_bd;
4104 m_adj(mp, pad);
4105 mp->m_pkthdr.len = len;
4106
4107 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4108 len, len_on_first_bd);
4109 if ((len > 60 ) && (len > len_on_first_bd)) {
4110
4111 mp->m_len = len_on_first_bd;
4112
4113 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4114 (len - len_on_first_bd)) != 0) {
4115
4116 m_freem(mp);

--- 134 unchanged lines hidden (view full) ---

4251
4252 if (ha->state != QLNX_STATE_OPEN) {
4253 return;
4254 }
4255
4256 idx = ivec->rss_idx;
4257
4258 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4259 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4260 ha->err_illegal_intr++;
4261 return;
4262 }
4263 fp = &ha->fp_array[idx];
4264
4265 if (fp == NULL) {
4266 ha->err_fp_null++;
4267 } else {
4268 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4269 if (fp->fp_taskqueue != NULL)
4270 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4271 }
4272
4273 return;

--- 11 unchanged lines hidden (view full) ---

4285 qlnx_host_t *ha;
4286
4287 p_hwfn = arg;
4288
4289 ha = (qlnx_host_t *)p_hwfn->p_dev;
4290
4291 ha->sp_interrupts++;
4292
4293 QL_DPRINT2(ha, "enter\n");
4294
4295 ecore_int_sp_dpc(p_hwfn);
4296
4297 QL_DPRINT2(ha, "exit\n");
4298
4299 return;
4300}
4301
4302/*****************************************************************************
4303 * Support Functions for DMA'able Memory
4304 *****************************************************************************/
4305

--- 31 unchanged lines hidden (view full) ---

4337 dma_buf->size, /* maxsize */
4338 1, /* nsegments */
4339 dma_buf->size, /* maxsegsize */
4340 0, /* flags */
4341 NULL, NULL, /* lockfunc, lockarg */
4342 &dma_buf->dma_tag);
4343
4344 if (ret) {
4345 QL_DPRINT1(ha, "could not create dma tag\n");
4346 goto qlnx_alloc_dmabuf_exit;
4347 }
4348 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4349 (void **)&dma_buf->dma_b,
4350 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4351 &dma_buf->dma_map);
4352 if (ret) {
4353 bus_dma_tag_destroy(dma_buf->dma_tag);
4354 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4355 goto qlnx_alloc_dmabuf_exit;
4356 }
4357
4358 ret = bus_dmamap_load(dma_buf->dma_tag,
4359 dma_buf->dma_map,
4360 dma_buf->dma_b,
4361 dma_buf->size,
4362 qlnx_dmamap_callback,

--- 45 unchanged lines hidden (view full) ---

4408 return (NULL);
4409 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4410
4411 *phys = dma_buf.dma_addr;
4412
4413 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4414
4415 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4416/*
4417 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4418 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4419 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4420*/
4421 return (dma_buf.dma_b);
4422}
4423
4424void
4425qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4426 uint32_t size)
4427{
4428 qlnx_dma_t dma_buf, *dma_p;

--- 4 unchanged lines hidden (view full) ---

4433 dev = ha->pci_dev;
4434
4435 if (v_addr == NULL)
4436 return;
4437
4438 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4439
4440 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4441/*
4442 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4443 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4444 dma_p->dma_b, (void *)dma_p->dma_addr, size);
4445*/
4446 dma_buf = *dma_p;
4447
4448 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4449 return;
4450}
4451
4452static int
4453qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)

--- 15 unchanged lines hidden (view full) ---

4469 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4470 0, /* nsegments */
4471 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4472 0, /* flags */
4473 NULL, NULL, /* lockfunc, lockarg */
4474 &ha->parent_tag);
4475
4476 if (ret) {
4477 QL_DPRINT1(ha, "could not create parent dma tag\n");
4478 return (-1);
4479 }
4480
4481 ha->flags.parent_tag = 1;
4482
4483 return (0);
4484}
4485

--- 18 unchanged lines hidden (view full) ---

4504 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
4505 QLNX_MAX_SEGMENTS, /* nsegments */
4506 (PAGE_SIZE * 4), /* maxsegsize */
4507 BUS_DMA_ALLOCNOW, /* flags */
4508 NULL, /* lockfunc */
4509 NULL, /* lockfuncarg */
4510 &ha->tx_tag)) {
4511
4512 QL_DPRINT1(ha, "tx_tag alloc failed\n");
4513 return (-1);
4514 }
4515
4516 return (0);
4517}
4518
4519static void
4520qlnx_free_tx_dma_tag(qlnx_host_t *ha)

--- 16 unchanged lines hidden (view full) ---

4537 MJUM9BYTES, /* maxsize */
4538 1, /* nsegments */
4539 MJUM9BYTES, /* maxsegsize */
4540 BUS_DMA_ALLOCNOW, /* flags */
4541 NULL, /* lockfunc */
4542 NULL, /* lockfuncarg */
4543 &ha->rx_tag)) {
4544
4545 QL_DPRINT1(ha, " rx_tag alloc failed\n");
4546
4547 return (-1);
4548 }
4549 return (0);
4550}
4551
4552static void
4553qlnx_free_rx_dma_tag(qlnx_host_t *ha)

--- 73 unchanged lines hidden (view full) ---

4627 pci_reg, reg_value, 4);
4628 return;
4629}
4630
4631
4632int
4633qlnx_pci_find_capability(void *ecore_dev, int cap)
4634{
4635 int reg;
4636 qlnx_host_t *ha;
4637
4638 ha = ecore_dev;
4639
4640 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
4641 return reg;
4642 else {
4643 QL_DPRINT1(ha, "failed\n");
4644 return 0;
4645 }
4646}
4647
4648uint32_t
4649qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4650{
4651 uint32_t data32;

--- 370 unchanged lines hidden (view full) ---

5022}
5023
5024void
5025qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5026{
5027 enum ecore_mcp_protocol_type type;
5028 union ecore_mcp_protocol_stats *stats;
5029 struct ecore_eth_stats eth_stats;
5030 qlnx_host_t *ha;
5031
5032 ha = cdev;
5033 stats = proto_stats;
5034 type = proto_type;
5035
5036 switch (type) {
5037
5038 case ECORE_MCP_LAN_STATS:
5039 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5040 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5041 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5042 stats->lan_stats.fcs_err = -1;
5043 break;
5044
5045 default:
5046 ha->err_get_proto_invalid_type++;
5047
5048 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5049 break;
5050 }
5051 return;
5052}
5053
5054static int
5055qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5056{
5057 struct ecore_hwfn *p_hwfn;
5058 struct ecore_ptt *p_ptt;
5059
5060 p_hwfn = &ha->cdev.hwfns[0];
5061 p_ptt = ecore_ptt_acquire(p_hwfn);
5062
5063 if (p_ptt == NULL) {
5064 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5065 return (-1);
5066 }
5067 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5068
5069 ecore_ptt_release(p_hwfn, p_ptt);
5070
5071 return (0);
5072}
5073
5074static int
5075qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5076{
5077 struct ecore_hwfn *p_hwfn;
5078 struct ecore_ptt *p_ptt;
5079
5080 p_hwfn = &ha->cdev.hwfns[0];
5081 p_ptt = ecore_ptt_acquire(p_hwfn);
5082
5083 if (p_ptt == NULL) {
5084 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5085 return (-1);
5086 }
5087 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5088
5089 ecore_ptt_release(p_hwfn, p_ptt);
5090
5091 return (0);
5092}

--- 86 unchanged lines hidden (view full) ---

5179 struct ecore_hwfn *p_hwfn;
5180 int hwfn_index, rc;
5181 u16 rel_sb_id;
5182
5183 hwfn_index = sb_id % cdev->num_hwfns;
5184 p_hwfn = &cdev->hwfns[hwfn_index];
5185 rel_sb_id = sb_id / cdev->num_hwfns;
5186
5187 QL_DPRINT2(((qlnx_host_t *)cdev),
5188 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5189 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5190 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5191 sb_virt_addr, (void *)sb_phy_addr);
5192
5193 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5194 sb_virt_addr, sb_phy_addr, rel_sb_id);
5195
5196 return rc;
5197}
5198
5199/* This function allocates fast-path status block memory */

--- 7 unchanged lines hidden (view full) ---

5207 struct ecore_dev *cdev;
5208
5209 cdev = &ha->cdev;
5210
5211 size = sizeof(*sb_virt);
5212 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5213
5214 if (!sb_virt) {
5215 QL_DPRINT1(ha, "Status block allocation failed\n");
5216 return -ENOMEM;
5217 }
5218
5219 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5220 if (rc) {
5221 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5222 }
5223
5224 return rc;
5225}
5226
5227static void
5228qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)

--- 79 unchanged lines hidden (view full) ---

5308
5309 cdev = &ha->cdev;
5310
5311 rx_buf_size = rxq->rx_buf_size;
5312
5313 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5314
5315 if (mp == NULL) {
5316 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5317 return -ENOMEM;
5318 }
5319
5320 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5321
5322 map = (bus_dmamap_t)0;
5323
5324 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5325 BUS_DMA_NOWAIT);
5326 dma_addr = segs[0].ds_addr;
5327
5328 if (ret || !dma_addr || (nsegs != 1)) {
5329 m_freem(mp);
5330 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5331 ret, (long long unsigned int)dma_addr, nsegs);
5332 return -ENOMEM;
5333 }
5334
5335 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5336 sw_rx_data->data = mp;
5337 sw_rx_data->dma_addr = dma_addr;
5338 sw_rx_data->map = map;
5339

--- 18 unchanged lines hidden (view full) ---

5358 bus_dma_segment_t segs[1];
5359 int nsegs;
5360 int ret;
5361 struct sw_rx_data *rx_buf;
5362
5363 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5364
5365 if (mp == NULL) {
5366 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5367 return -ENOMEM;
5368 }
5369
5370 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5371
5372 map = (bus_dmamap_t)0;
5373
5374 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5375 BUS_DMA_NOWAIT);
5376 dma_addr = segs[0].ds_addr;
5377
5378 if (ret || !dma_addr || (nsegs != 1)) {
5379 m_freem(mp);
5380 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5381 ret, (long long unsigned int)dma_addr, nsegs);
5382 return -ENOMEM;
5383 }
5384
5385 rx_buf = &tpa->rx_buf;
5386
5387 memset(rx_buf, 0, sizeof (struct sw_rx_data));
5388
5389 rx_buf->data = mp;

--- 80 unchanged lines hidden (view full) ---

5470
5471 for (i = 0; i < rxq->num_rx_buffers; i++) {
5472 rc = qlnx_alloc_rx_buffer(ha, rxq);
5473 if (rc)
5474 break;
5475 }
5476 num_allocated = i;
5477 if (!num_allocated) {
5478 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5479 goto err;
5480 } else if (num_allocated < rxq->num_rx_buffers) {
5481 QL_DPRINT1(ha, "Allocated less buffers than"
5482 " desired (%d allocated)\n", num_allocated);
5483 }
5484
5485#ifdef QLNX_SOFT_LRO
5486
5487 {
5488 struct lro_ctrl *lro;
5489
5490 lro = &rxq->lro;
5491
5492#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5493 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5494 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5495 rxq->rxq_id);
5496 goto err;
5497 }
5498#else
5499 if (tcp_lro_init(lro)) {
5500 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5501 rxq->rxq_id);
5502 goto err;
5503 }
5504#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5505
5506 lro->ifp = ha->ifp;
5507 }
5508#endif /* #ifdef QLNX_SOFT_LRO */
5509 return 0;

--- 107 unchanged lines hidden (view full) ---

5617 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5618 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5619
5620 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5621
5622 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5623 M_NOWAIT, &fp->tx_mtx);
5624 if (fp->tx_br == NULL) {
5625 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
5626 ha->dev_unit, fp->rss_id);
5627 return -ENOMEM;
5628 }
5629 return 0;
5630}
5631
5632static int
5633qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5634{
5635 int rc, tc;

--- 90 unchanged lines hidden (view full) ---

5726 vport_start_params.tpa_mode =
5727 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5728 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5729
5730 vport_start_params.vport_id = vport_id;
5731 vport_start_params.mtu = mtu;
5732
5733
5734 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
5735
5736 for_each_hwfn(cdev, i) {
5737 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5738
5739 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5740 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5741
5742 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5743
5744 if (rc) {
5745 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
5746 " with MTU %d\n" , vport_id, mtu);
5747 return -ENOMEM;
5748 }
5749
5750 ecore_hw_start_fastpath(p_hwfn);
5751
5752 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
5753 vport_id, mtu);
5754 }
5755 return 0;
5756}
5757
5758
5759static int
5760qlnx_update_vport(struct ecore_dev *cdev,
5761 struct qlnx_update_vport_params *params)

--- 21 unchanged lines hidden (view full) ---

5783 params->update_inner_vlan_removal_flg;
5784 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5785
5786 sp_params.sge_tpa_params = params->sge_tpa_params;
5787
5788 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5789 * We need to re-fix the rss values per engine for CMT.
5790 */
5791 if (params->rss_params->update_rss_config)
5792 sp_params.rss_params = params->rss_params;
5793 else
5794 sp_params.rss_params = NULL;
5795
5796 for_each_hwfn(cdev, i) {
5797
5798 p_hwfn = &cdev->hwfns[i];
5799
5800 if ((cdev->num_hwfns > 1) &&
5801 params->rss_params->update_rss_config &&
5802 params->rss_params->rss_enable) {

--- 5 unchanged lines hidden (view full) ---

5808 fp_index = ((cdev->num_hwfns * j) + i) %
5809 ha->num_rss;
5810
5811 fp = &ha->fp_array[fp_index];
5812 rss->rss_ind_table[j] = fp->rxq->handle;
5813 }
5814
5815 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5816 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
5817 rss->rss_ind_table[j],
5818 rss->rss_ind_table[j+1],
5819 rss->rss_ind_table[j+2],
5820 rss->rss_ind_table[j+3],
5821 rss->rss_ind_table[j+4],
5822 rss->rss_ind_table[j+5],
5823 rss->rss_ind_table[j+6],
5824 rss->rss_ind_table[j+7]);
5825 j += 8;
5826 }
5827 }
5828
5829 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5830
5831 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
5832
5833 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5834 ECORE_SPQ_MODE_EBLOCK, NULL);
5835 if (rc) {
5836 QL_DPRINT1(ha, "Failed to update VPORT\n");
5837 return rc;
5838 }
5839
5840 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
5841 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5842 params->vport_id, params->vport_active_tx_flg,
5843 params->vport_active_rx_flg,
5844 params->update_vport_active_tx_flg,
5845 params->update_vport_active_rx_flg);
5846 }
5847
5848 return 0;
5849}
5850
5851static void
5852qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5853{

--- 77 unchanged lines hidden (view full) ---

5931 struct ifnet *ifp;
5932 struct ecore_hwfn *p_hwfn;
5933 struct ecore_sge_tpa_params tpa_params;
5934 struct ecore_queue_start_common_params qparams;
5935 struct qlnx_fastpath *fp;
5936
5937 ifp = ha->ifp;
5938
5939 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
5940
5941 if (!ha->num_rss) {
5942 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
5943 " are no Rx queues\n");
5944 return -EINVAL;
5945 }
5946
5947#ifndef QLNX_SOFT_LRO
5948 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
5949#endif /* #ifndef QLNX_SOFT_LRO */
5950
5951 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
5952 vlan_removal_en, tx_switching, hw_lro_enable);
5953
5954 if (rc) {
5955 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
5956 return rc;
5957 }
5958
5959 QL_DPRINT2(ha, "Start vport ramrod passed, "
5960 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
5961 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
5962
5963 for_each_rss(i) {
5964 struct ecore_rxq_start_ret_params rx_ret_params;
5965 struct ecore_txq_start_ret_params tx_ret_params;
5966
5967 fp = &ha->fp_array[i];
5968 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
5969

--- 16 unchanged lines hidden (view full) ---

5986 fp->rxq->rx_bd_ring.p_phys_addr,
5987 /* cqe_pbl_addr */
5988 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
5989 /* cqe_pbl_size */
5990 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
5991 &rx_ret_params);
5992
5993 if (rc) {
5994 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
5995 return rc;
5996 }
5997
5998 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
5999 fp->rxq->handle = rx_ret_params.p_handle;
6000 fp->rxq->hw_cons_ptr =
6001 &fp->sb_info->sb_virt->pi_array[RX_PI];
6002

--- 17 unchanged lines hidden (view full) ---

6020 p_hwfn->hw_info.opaque_fid,
6021 &qparams, tc,
6022 /* bd_chain_phys_addr */
6023 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6024 ecore_chain_get_page_cnt(&txq->tx_pbl),
6025 &tx_ret_params);
6026
6027 if (rc) {
6028 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6029 txq->index, rc);
6030 return rc;
6031 }
6032
6033 txq->doorbell_addr = tx_ret_params.p_doorbell;
6034 txq->handle = tx_ret_params.p_handle;
6035
6036 txq->hw_cons_ptr =
6037 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];

--- 63 unchanged lines hidden (view full) ---

6101 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6102 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6103
6104 vport_update_params.sge_tpa_params = &tpa_params;
6105 }
6106
6107 rc = qlnx_update_vport(cdev, &vport_update_params);
6108 if (rc) {
6109 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6110 return rc;
6111 }
6112
6113 return 0;
6114}
6115
6116static int
6117qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6118 struct qlnx_tx_queue *txq)
6119{
6120 uint16_t hw_bd_cons;
6121 uint16_t ecore_cons_idx;
6122
6123 QL_DPRINT2(ha, "enter\n");
6124
6125 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6126
6127 while (hw_bd_cons !=
6128 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6129
6130 mtx_lock(&fp->tx_mtx);
6131
6132 (void)qlnx_tx_int(ha, fp, txq);
6133
6134 mtx_unlock(&fp->tx_mtx);
6135
6136 qlnx_mdelay(__func__, 2);
6137
6138 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6139 }
6140
6141 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6142
6143 return 0;
6144}
6145
6146static int
6147qlnx_stop_queues(qlnx_host_t *ha)
6148{
6149 struct qlnx_update_vport_params vport_update_params;

--- 13 unchanged lines hidden (view full) ---

6163 vport_update_params.update_vport_active_rx_flg = 1;
6164 vport_update_params.vport_active_rx_flg = 0;
6165 vport_update_params.rss_params = &ha->rss_params;
6166 vport_update_params.rss_params->update_rss_config = 0;
6167 vport_update_params.rss_params->rss_enable = 0;
6168 vport_update_params.update_inner_vlan_removal_flg = 0;
6169 vport_update_params.inner_vlan_removal_flg = 0;
6170
6171 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6172
6173 rc = qlnx_update_vport(cdev, &vport_update_params);
6174 if (rc) {
6175 QL_DPRINT1(ha, "Failed to update vport\n");
6176 return rc;
6177 }
6178
6179 /* Flush Tx queues. If needed, request drain from MCP */
6180 for_each_rss(i) {
6181 fp = &ha->fp_array[i];
6182
6183 for (tc = 0; tc < ha->num_tc; tc++) {

--- 16 unchanged lines hidden (view full) ---

6200 for (tc = 0; tc < ha->num_tc; tc++) {
6201 int tx_queue_id;
6202
6203 tx_queue_id = tc * ha->num_rss + i;
6204 rc = ecore_eth_tx_queue_stop(p_hwfn,
6205 fp->txq[tc]->handle);
6206
6207 if (rc) {
6208 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6209 tx_queue_id);
6210 return rc;
6211 }
6212 }
6213
6214 /* Stop the Rx Queue*/
6215 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6216 false);
6217 if (rc) {
6218 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6219 return rc;
6220 }
6221 }
6222
6223 /* Stop the vport */
6224 for_each_hwfn(cdev, i) {
6225
6226 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6227
6228 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6229
6230 if (rc) {
6231 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6232 return rc;
6233 }
6234 }
6235
6236 return rc;
6237}
6238
6239static int

--- 262 unchanged lines hidden (view full) ---

6502 int i;
6503 int rc = 0;
6504 struct ecore_dev *cdev;
6505 device_t dev;
6506
6507 cdev = &ha->cdev;
6508 dev = ha->pci_dev;
6509
6510 QL_DPRINT2(ha, "enter\n");
6511
6512 rc = qlnx_alloc_mem_arrays(ha);
6513 if (rc)
6514 goto qlnx_load_exit0;
6515
6516 qlnx_init_fp(ha);
6517
6518 rc = qlnx_alloc_mem_load(ha);
6519 if (rc)
6520 goto qlnx_load_exit1;
6521
6522 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6523 ha->num_rss, ha->num_tc);
6524
6525 for (i = 0; i < ha->num_rss; i++) {
6526
6527 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6528 (INTR_TYPE_NET | INTR_MPSAFE),
6529 NULL, qlnx_fp_isr, &ha->irq_vec[i],
6530 &ha->irq_vec[i].handle))) {
6531
6532 QL_DPRINT1(ha, "could not setup interrupt\n");
6533 goto qlnx_load_exit2;
6534 }
6535
6536 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6537 irq %p handle %p\n", i,
6538 ha->irq_vec[i].irq_rid,
6539 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6540
6541 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6542 }
6543
6544 rc = qlnx_start_queues(ha);
6545 if (rc)
6546 goto qlnx_load_exit2;
6547
6548 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
6549
6550 /* Add primary mac and set Rx filters */
6551 rc = qlnx_set_rx_mode(ha);
6552 if (rc)
6553 goto qlnx_load_exit2;
6554
6555 /* Ask for link-up using current configuration */
6556 qlnx_set_link(ha, true);

--- 9 unchanged lines hidden (view full) ---

6566
6567qlnx_load_exit2:
6568 qlnx_free_mem_load(ha);
6569
6570qlnx_load_exit1:
6571 ha->num_rss = 0;
6572
6573qlnx_load_exit0:
6574 QL_DPRINT2(ha, "exit [%d]\n", rc);
6575 return rc;
6576}
6577
6578static void
6579qlnx_drain_soft_lro(qlnx_host_t *ha)
6580{
6581#ifdef QLNX_SOFT_LRO
6582

--- 40 unchanged lines hidden (view full) ---

6623{
6624 struct ecore_dev *cdev;
6625 device_t dev;
6626 int i;
6627
6628 cdev = &ha->cdev;
6629 dev = ha->pci_dev;
6630
6631 QL_DPRINT2(ha, "enter\n");
6632 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
6633
6634 if (ha->state == QLNX_STATE_OPEN) {
6635
6636 qlnx_set_link(ha, false);
6637 qlnx_clean_filters(ha);
6638 qlnx_stop_queues(ha);
6639 ecore_hw_stop_fastpath(cdev);
6640

--- 13 unchanged lines hidden (view full) ---

6654
6655 if (ha->flags.callout_init)
6656 callout_drain(&ha->qlnx_callout);
6657
6658 qlnx_mdelay(__func__, 1000);
6659
6660 ha->state = QLNX_STATE_CLOSED;
6661
6662 QL_DPRINT2(ha, "exit\n");
6663 return;
6664}
6665
6666static int
6667qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6668{
6669 int rval = -1;
6670 struct ecore_hwfn *p_hwfn;
6671 struct ecore_ptt *p_ptt;
6672
6673 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6674
6675 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6676 p_ptt = ecore_ptt_acquire(p_hwfn);
6677
6678 if (!p_ptt) {
6679 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6680 return (rval);
6681 }
6682
6683 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6684
6685 if (rval == DBG_STATUS_OK)
6686 rval = 0;
6687 else {
6688 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
6689 "[0x%x]\n", rval);
6690 }
6691
6692 ecore_ptt_release(p_hwfn, p_ptt);
6693
6694 return (rval);
6695}
6696
6697static int

--- 4 unchanged lines hidden (view full) ---

6702 struct ecore_ptt *p_ptt;
6703
6704 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6705
6706 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6707 p_ptt = ecore_ptt_acquire(p_hwfn);
6708
6709 if (!p_ptt) {
6710 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6711 return (rval);
6712 }
6713
6714 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6715
6716 if (rval == DBG_STATUS_OK)
6717 rval = 0;
6718 else {
6719 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
6720 " [0x%x]\n", rval);
6721 }
6722
6723 ecore_ptt_release(p_hwfn, p_ptt);
6724
6725 return (rval);
6726}
6727
6728

--- 264 unchanged lines hidden ---