1255736Sdavidch/*- 2265797Sdavidcs * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3255736Sdavidch * 4255736Sdavidch * Redistribution and use in source and binary forms, with or without 5255736Sdavidch * modification, are permitted provided that the following conditions 6255736Sdavidch * are met: 7255736Sdavidch * 8255736Sdavidch * 1. Redistributions of source code must retain the above copyright 9255736Sdavidch * notice, this list of conditions and the following disclaimer. 10255736Sdavidch * 2. Redistributions in binary form must reproduce the above copyright 11255736Sdavidch * notice, this list of conditions and the following disclaimer in the 12255736Sdavidch * documentation and/or other materials provided with the distribution. 13255736Sdavidch * 14255736Sdavidch * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15255736Sdavidch * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16255736Sdavidch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17255736Sdavidch * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18255736Sdavidch * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19255736Sdavidch * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20255736Sdavidch * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21255736Sdavidch * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22255736Sdavidch * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23255736Sdavidch * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24255736Sdavidch * THE POSSIBILITY OF SUCH DAMAGE. 25255736Sdavidch */ 26255736Sdavidch 27255736Sdavidch#include <sys/cdefs.h> 28255736Sdavidch__FBSDID("$FreeBSD: releng/10.2/sys/dev/bxe/ecore_sp.h 272004 2014-09-22 21:12:30Z davidcs $"); 29255736Sdavidch 30255736Sdavidch#ifndef ECORE_SP_H 31255736Sdavidch#define ECORE_SP_H 32255736Sdavidch 33255736Sdavidch 34255736Sdavidch#include <sys/types.h> 35255736Sdavidch#include <sys/endian.h> 36255736Sdavidch#include <sys/param.h> 37255736Sdavidch#include <sys/lock.h> 38255736Sdavidch#include <sys/mutex.h> 39255736Sdavidch#include <sys/malloc.h> 40255736Sdavidch#include <sys/kernel.h> 41255736Sdavidch#include <machine/bus.h> 42255736Sdavidch#include <net/ethernet.h> 43255736Sdavidch 44255736Sdavidch#if _BYTE_ORDER == _LITTLE_ENDIAN 45255736Sdavidch#ifndef LITTLE_ENDIAN 46255736Sdavidch#define LITTLE_ENDIAN 47255736Sdavidch#endif 48255736Sdavidch#ifndef __LITTLE_ENDIAN 49255736Sdavidch#define __LITTLE_ENDIAN 50255736Sdavidch#endif 51255736Sdavidch#undef BIG_ENDIAN 52255736Sdavidch#undef __BIG_ENDIAN 53255736Sdavidch#else /* _BIG_ENDIAN */ 54255736Sdavidch#ifndef BIG_ENDIAN 55255736Sdavidch#define BIG_ENDIAN 56255736Sdavidch#endif 57255736Sdavidch#ifndef __BIG_ENDIAN 58255736Sdavidch#define __BIG_ENDIAN 59255736Sdavidch#endif 60255736Sdavidch#undef LITTLE_ENDIAN 61255736Sdavidch#undef __LITTLE_ENDIAN 62255736Sdavidch#endif 63255736Sdavidch 64255736Sdavidch#include "ecore_mfw_req.h" 65255736Sdavidch#include "ecore_fw_defs.h" 66255736Sdavidch#include "ecore_hsi.h" 67255736Sdavidch#include "ecore_reg.h" 68255736Sdavidch 69255736Sdavidchstruct bxe_softc; 70255736Sdavidchtypedef bus_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */ 71255736Sdavidchtypedef volatile int ecore_atomic_t; 72255736Sdavidch 73256319Sedavis#ifndef __bool_true_false_are_defined 74256319Sedavis#ifndef __cplusplus 75256319Sedavis#define bool _Bool 76256319Sedavis#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER) 77256319Sedavistypedef _Bool bool; 78255736Sdavidch#endif 79256319Sedavis#endif /* !__cplusplus */ 80256319Sedavis#endif /* !__bool_true_false_are_defined$ */ 81255736Sdavidch 82255736Sdavidch#define ETH_ALEN ETHER_ADDR_LEN /* 6 */ 83255736Sdavidch 84255736Sdavidch#define ECORE_SWCID_SHIFT 17 85255736Sdavidch#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1) 86255736Sdavidch 87255736Sdavidch#define ECORE_MC_HASH_SIZE 8 88255736Sdavidch#define ECORE_MC_HASH_OFFSET(sc, i) \ 89255736Sdavidch (BAR_TSTRORM_INTMEM + \ 90255736Sdavidch TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4) 91255736Sdavidch 92255736Sdavidch#define ECORE_MAX_MULTICAST 64 93255736Sdavidch#define ECORE_MAX_EMUL_MULTI 1 94255736Sdavidch 95255736Sdavidch#define IRO sc->iro_array 96255736Sdavidch 97255736Sdavidchtypedef struct mtx ECORE_MUTEX; 98255736Sdavidch#define ECORE_MUTEX_INIT(_mutex) \ 99255736Sdavidch mtx_init(_mutex, "ecore_lock", "ECORE Lock", MTX_DEF) 100255736Sdavidch#define ECORE_MUTEX_LOCK(_mutex) mtx_lock(_mutex) 101255736Sdavidch#define ECORE_MUTEX_UNLOCK(_mutex) mtx_unlock(_mutex) 102255736Sdavidch 103255736Sdavidchtypedef struct mtx ECORE_MUTEX_SPIN; 104255736Sdavidch#define ECORE_SPIN_LOCK_INIT(_spin, _sc) \ 105255736Sdavidch mtx_init(_spin, "ecore_lock", "ECORE Lock", MTX_DEF) 106255736Sdavidch#define ECORE_SPIN_LOCK_BH(_spin) mtx_lock(_spin) /* bh = bottom-half */ 107255736Sdavidch#define ECORE_SPIN_UNLOCK_BH(_spin) mtx_unlock(_spin) /* bh = bottom-half */ 108255736Sdavidch 109255736Sdavidch#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb() 110255736Sdavidch#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb() 111255736Sdavidch#define ECORE_SMP_MB() mb() 112255736Sdavidch#define ECORE_SMP_RMB() rmb() 113255736Sdavidch#define ECORE_SMP_WMB() wmb() 114255736Sdavidch#define ECORE_MMIOWB() wmb() 115255736Sdavidch 116255736Sdavidch#define ECORE_SET_BIT_NA(bit, var) bit_set(var, bit) /* non-atomic */ 117255736Sdavidch#define ECORE_CLEAR_BIT_NA(bit, var) bit_clear(var, bit) /* non-atomic */ 118255736Sdavidch#define ECORE_TEST_BIT(bit, var) bxe_test_bit(bit, var) 119255736Sdavidch#define ECORE_SET_BIT(bit, var) bxe_set_bit(bit, var) 120255736Sdavidch#define ECORE_CLEAR_BIT(bit, var) bxe_clear_bit(bit, var) 121255736Sdavidch#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bxe_test_and_clear_bit(bit, var) 122255736Sdavidch 123255736Sdavidch#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a) 124255736Sdavidch#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v) 125255736Sdavidch#define ECORE_ATOMIC_CMPXCHG(a, o, n) bxe_cmpxchg((volatile int *)a, o, n) 126255736Sdavidch 127255736Sdavidch#define ECORE_RET_PENDING(pending_bit, pending) \ 128255736Sdavidch (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS) 129255736Sdavidch 130255736Sdavidch#define ECORE_SET_FLAG(value, mask, flag) \ 131255736Sdavidch do { \ 132255736Sdavidch (value) &= ~(mask); \ 133255736Sdavidch (value) |= ((flag) << (mask##_SHIFT)); \ 134255736Sdavidch } while (0) 135255736Sdavidch 136255736Sdavidch#define ECORE_GET_FLAG(value, mask) \ 137255736Sdavidch (((value) &= (mask)) >> (mask##_SHIFT)) 138255736Sdavidch 139255736Sdavidch#define ECORE_MIGHT_SLEEP() 140255736Sdavidch 141255736Sdavidch#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id) 142255736Sdavidch 143255736Sdavidch#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s) 144255736Sdavidch#define ECORE_MEMCPY(_a, _b, _s) memcpy(_a, _b, _s) 145255736Sdavidch#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s) 146255736Sdavidch 147255736Sdavidch#define ECORE_CPU_TO_LE16(x) htole16(x) 148255736Sdavidch#define ECORE_CPU_TO_LE32(x) htole32(x) 149255736Sdavidch 150255736Sdavidch#define ECORE_WAIT(_s, _t) DELAY(1000) 151255736Sdavidch#define ECORE_MSLEEP(_t) DELAY((_t) * 1000) 152255736Sdavidch 153255736Sdavidch#define ECORE_LIKELY(x) __predict_true(x) 154255736Sdavidch#define ECORE_UNLIKELY(x) __predict_false(x) 155255736Sdavidch 156255736Sdavidch#define ECORE_ZALLOC(_size, _flags, _sc) \ 157255736Sdavidch malloc(_size, M_TEMP, (M_NOWAIT | M_ZERO)) 158255736Sdavidch 159255736Sdavidch#define ECORE_CALLOC(_len, _size, _flags, _sc) \ 160255736Sdavidch malloc(_len * _size, M_TEMP, (M_NOWAIT | M_ZERO)) 161255736Sdavidch 162255736Sdavidch#define ECORE_FREE(_s, _buf, _size) free(_buf, M_TEMP) 163255736Sdavidch 164255736Sdavidch#define SC_ILT(sc) ((sc)->ilt) 165255736Sdavidch#define ILOG2(x) bxe_ilog2(x) 166255736Sdavidch 167255736Sdavidch#define ECORE_ILT_ZALLOC(x, y, size) \ 168255736Sdavidch do { \ 169255736Sdavidch x = malloc(sizeof(struct bxe_dma), M_DEVBUF, (M_NOWAIT | M_ZERO)); \ 170255736Sdavidch if (x) { \ 171255736Sdavidch if (bxe_dma_alloc((struct bxe_softc *)sc, \ 172255736Sdavidch size, (struct bxe_dma *)x, \ 173255736Sdavidch "ECORE_ILT") != 0) { \ 174255736Sdavidch free(x, M_DEVBUF); \ 175255736Sdavidch x = NULL; \ 176255736Sdavidch *y = 0; \ 177255736Sdavidch } else { \ 178255736Sdavidch *y = ((struct bxe_dma *)x)->paddr; \ 179255736Sdavidch } \ 180255736Sdavidch } \ 181255736Sdavidch } while (0) 182255736Sdavidch 183255736Sdavidch#define ECORE_ILT_FREE(x, y, size) \ 184255736Sdavidch do { \ 185255736Sdavidch if (x) { \ 186255736Sdavidch bxe_dma_free((struct bxe_softc *)sc, x); \ 187255736Sdavidch free(x, M_DEVBUF); \ 188255736Sdavidch x = NULL; \ 189255736Sdavidch y = 0; \ 190255736Sdavidch } \ 191255736Sdavidch } while (0) 192255736Sdavidch 193255736Sdavidch#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE 194255736Sdavidch 195255736Sdavidch#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE 196255736Sdavidch#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE 197255736Sdavidch#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE 198255736Sdavidch 199255736Sdavidch#define ECORE_SET_CTX_VALIDATION bxe_set_ctx_validation 200255736Sdavidch 201255736Sdavidch#define ECORE_UPDATE_COALESCE_SB_INDEX bxe_update_coalesce_sb_index 202255736Sdavidch 203255736Sdavidch#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a)) 204255736Sdavidch 205255736Sdavidch#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN 206255736Sdavidch 207255736Sdavidch#define ECORE_PATH_ID SC_PATH 208255736Sdavidch#define ECORE_PORT_ID SC_PORT 209255736Sdavidch#define ECORE_FUNC_ID SC_FUNC 210255736Sdavidch#define ECORE_ABS_FUNC_ID SC_ABS_FUNC 211255736Sdavidch 212255736Sdavidchuint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, 213255736Sdavidch uint32_t crc32_seed, uint8_t complement); 214255736Sdavidchstatic inline uint32_t 215255736SdavidchECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len) 216255736Sdavidch{ 217255736Sdavidch uint32_t packet_buf[2] = {0}; 218255736Sdavidch memcpy(((uint8_t *)(&packet_buf[0]))+2, &mac[0], 2); 219255736Sdavidch memcpy(&packet_buf[1], &mac[2], 4); 220255736Sdavidch return bswap32(calc_crc32((uint8_t *)packet_buf, 8, seed, 0)); 221255736Sdavidch} 222255736Sdavidch 223255736Sdavidch#define ecore_sp_post(_sc, _a, _b, _c, _d) \ 224255736Sdavidch bxe_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d) 225255736Sdavidch 226271725Sdavidcs#ifdef ECORE_STOP_ON_ERROR 227271725Sdavidcs 228255736Sdavidch#define ECORE_DBG_BREAK_IF(exp) \ 229255736Sdavidch do { \ 230255736Sdavidch if (__predict_false(exp)) { \ 231255736Sdavidch panic("ECORE"); \ 232255736Sdavidch } \ 233255736Sdavidch } while (0) 234255736Sdavidch 235255736Sdavidch#define ECORE_BUG() \ 236255736Sdavidch do { \ 237255736Sdavidch panic("BUG (%s:%d)", __FILE__, __LINE__); \ 238255736Sdavidch } while(0); 239255736Sdavidch 240255736Sdavidch#define ECORE_BUG_ON(exp) \ 241255736Sdavidch do { \ 242255736Sdavidch if (__predict_true(exp)) { \ 243255736Sdavidch panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \ 244255736Sdavidch } \ 245255736Sdavidch } while (0) 246255736Sdavidch 247271725Sdavidcs#else 248271725Sdavidcs 249272004Sdavidcsextern unsigned long bxe_debug; 250272004Sdavidcs 251272004Sdavidcs#define BXE_DEBUG_ECORE_DBG_BREAK_IF 0x01 252272004Sdavidcs#define BXE_DEBUG_ECORE_BUG 0x02 253272004Sdavidcs#define BXE_DEBUG_ECORE_BUG_ON 0x04 254272004Sdavidcs 255271725Sdavidcs#define ECORE_DBG_BREAK_IF(exp) \ 256272004Sdavidcs if (bxe_debug & BXE_DEBUG_ECORE_DBG_BREAK_IF) \ 257272004Sdavidcs printf("%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__); 258271725Sdavidcs 259271725Sdavidcs#define ECORE_BUG(exp) \ 260272004Sdavidcs if (bxe_debug & BXE_DEBUG_ECORE_BUG) \ 261272004Sdavidcs printf("%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__); 262271725Sdavidcs 263271725Sdavidcs#define ECORE_BUG_ON(exp) \ 264272004Sdavidcs if (bxe_debug & BXE_DEBUG_ECORE_BUG_ON) \ 265272004Sdavidcs printf("%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__); 266271725Sdavidcs 267271725Sdavidcs 268271725Sdavidcs#endif /* #ifdef ECORE_STOP_ON_ERROR */ 269271725Sdavidcs 270255736Sdavidch#define ECORE_ERR(str, ...) \ 271255736Sdavidch BLOGE(sc, "ECORE: " str, ##__VA_ARGS__) 272255736Sdavidch 273255736Sdavidch#define DBG_SP 0x00000004 /* defined in bxe.h */ 274255736Sdavidch 275255736Sdavidch#define ECORE_MSG(sc, m, ...) \ 276255736Sdavidch BLOGD(sc, DBG_SP, "ECORE: " m, ##__VA_ARGS__) 277255736Sdavidch 278255736Sdavidchtypedef struct _ecore_list_entry_t 279255736Sdavidch{ 280255736Sdavidch struct _ecore_list_entry_t *next, *prev; 281255736Sdavidch} ecore_list_entry_t; 282255736Sdavidch 283255736Sdavidchtypedef struct ecore_list_t 284255736Sdavidch{ 285255736Sdavidch ecore_list_entry_t *head, *tail; 286255736Sdavidch unsigned long cnt; 287255736Sdavidch} ecore_list_t; 288255736Sdavidch 289255736Sdavidch/* initialize the list */ 290255736Sdavidch#define ECORE_LIST_INIT(_list) \ 291255736Sdavidch do { \ 292255736Sdavidch (_list)->head = NULL; \ 293255736Sdavidch (_list)->tail = NULL; \ 294255736Sdavidch (_list)->cnt = 0; \ 295255736Sdavidch } while (0) 296255736Sdavidch 297255736Sdavidch/* return TRUE if the element is the last on the list */ 298255736Sdavidch#define ECORE_LIST_IS_LAST(_elem, _list) \ 299255736Sdavidch (_elem == (_list)->tail) 300255736Sdavidch 301255736Sdavidch/* return TRUE if the list is empty */ 302255736Sdavidch#define ECORE_LIST_IS_EMPTY(_list) \ 303255736Sdavidch ((_list)->cnt == 0) 304255736Sdavidch 305255736Sdavidch/* return the first element */ 306255736Sdavidch#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \ 307255736Sdavidch (cast *)((_list)->head) 308255736Sdavidch 309255736Sdavidch/* return the next element */ 310255736Sdavidch#define ECORE_LIST_NEXT(_elem, _link, cast) \ 311255736Sdavidch (cast *)((&((_elem)->_link))->next) 312255736Sdavidch 313255736Sdavidch/* push an element on the head of the list */ 314255736Sdavidch#define ECORE_LIST_PUSH_HEAD(_elem, _list) \ 315255736Sdavidch do { \ 316255736Sdavidch (_elem)->prev = (ecore_list_entry_t *)0; \ 317255736Sdavidch (_elem)->next = (_list)->head; \ 318255736Sdavidch if ((_list)->tail == (ecore_list_entry_t *)0) { \ 319255736Sdavidch (_list)->tail = (_elem); \ 320255736Sdavidch } else { \ 321255736Sdavidch (_list)->head->prev = (_elem); \ 322255736Sdavidch } \ 323255736Sdavidch (_list)->head = (_elem); \ 324255736Sdavidch (_list)->cnt++; \ 325255736Sdavidch } while (0) 326255736Sdavidch 327255736Sdavidch/* push an element on the tail of the list */ 328255736Sdavidch#define ECORE_LIST_PUSH_TAIL(_elem, _list) \ 329255736Sdavidch do { \ 330255736Sdavidch (_elem)->next = (ecore_list_entry_t *)0; \ 331255736Sdavidch (_elem)->prev = (_list)->tail; \ 332255736Sdavidch if ((_list)->tail) { \ 333255736Sdavidch (_list)->tail->next = (_elem); \ 334255736Sdavidch } else { \ 335255736Sdavidch (_list)->head = (_elem); \ 336255736Sdavidch } \ 337255736Sdavidch (_list)->tail = (_elem); \ 338255736Sdavidch (_list)->cnt++; \ 339255736Sdavidch } while (0) 340255736Sdavidch 341255736Sdavidch/* push list1 on the head of list2 and return with list1 as empty */ 342255736Sdavidch#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \ 343255736Sdavidch do { \ 344255736Sdavidch (_list1)->tail->next = (_list2)->head; \ 345255736Sdavidch if ((_list2)->head) { \ 346255736Sdavidch (_list2)->head->prev = (_list1)->tail; \ 347255736Sdavidch } else { \ 348255736Sdavidch (_list2)->tail = (_list1)->tail; \ 349255736Sdavidch } \ 350255736Sdavidch (_list2)->head = (_list1)->head; \ 351255736Sdavidch (_list2)->cnt += (_list1)->cnt; \ 352255736Sdavidch (_list1)->head = NULL; \ 353255736Sdavidch (_list1)->tail = NULL; \ 354255736Sdavidch (_list1)->cnt = 0; \ 355255736Sdavidch } while (0) 356255736Sdavidch 357255736Sdavidch/* remove an element from the list */ 358255736Sdavidch#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \ 359255736Sdavidch do { \ 360255736Sdavidch if ((_list)->head == (_elem)) { \ 361255736Sdavidch if ((_list)->head) { \ 362255736Sdavidch (_list)->head = (_list)->head->next; \ 363255736Sdavidch if ((_list)->head) { \ 364255736Sdavidch (_list)->head->prev = (ecore_list_entry_t *)0; \ 365255736Sdavidch } else { \ 366255736Sdavidch (_list)->tail = (ecore_list_entry_t *)0; \ 367255736Sdavidch } \ 368255736Sdavidch (_list)->cnt--; \ 369255736Sdavidch } \ 370255736Sdavidch } else if ((_list)->tail == (_elem)) { \ 371255736Sdavidch if ((_list)->tail) { \ 372255736Sdavidch (_list)->tail = (_list)->tail->prev; \ 373255736Sdavidch if ((_list)->tail) { \ 374255736Sdavidch (_list)->tail->next = (ecore_list_entry_t *)0; \ 375255736Sdavidch } else { \ 376255736Sdavidch (_list)->head = (ecore_list_entry_t *)0; \ 377255736Sdavidch } \ 378255736Sdavidch (_list)->cnt--; \ 379255736Sdavidch } \ 380255736Sdavidch } else { \ 381255736Sdavidch (_elem)->prev->next = (_elem)->next; \ 382255736Sdavidch (_elem)->next->prev = (_elem)->prev; \ 383255736Sdavidch (_list)->cnt--; \ 384255736Sdavidch } \ 385255736Sdavidch } while (0) 386255736Sdavidch 387255736Sdavidch/* walk the list */ 388255736Sdavidch#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \ 389255736Sdavidch for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \ 390255736Sdavidch pos; \ 391255736Sdavidch pos = ECORE_LIST_NEXT(pos, _link, cast)) 392255736Sdavidch 393255736Sdavidch/* walk the list (safely) */ 394255736Sdavidch#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \ 395255736Sdavidch for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \ 396255736Sdavidch n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \ 397255736Sdavidch pos != NULL; \ 398255736Sdavidch pos = (cast *)n, \ 399255736Sdavidch n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL) 400255736Sdavidch 401255736Sdavidch 402255736Sdavidch/* Manipulate a bit vector defined as an array of uint64_t */ 403255736Sdavidch 404255736Sdavidch/* Number of bits in one sge_mask array element */ 405255736Sdavidch#define BIT_VEC64_ELEM_SZ 64 406255736Sdavidch#define BIT_VEC64_ELEM_SHIFT 6 407255736Sdavidch#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1) 408255736Sdavidch 409255736Sdavidch#define __BIT_VEC64_SET_BIT(el, bit) \ 410255736Sdavidch do { \ 411255736Sdavidch el = ((el) | ((uint64_t)0x1 << (bit))); \ 412255736Sdavidch } while (0) 413255736Sdavidch 414255736Sdavidch#define __BIT_VEC64_CLEAR_BIT(el, bit) \ 415255736Sdavidch do { \ 416255736Sdavidch el = ((el) & (~((uint64_t)0x1 << (bit)))); \ 417255736Sdavidch } while (0) 418255736Sdavidch 419255736Sdavidch#define BIT_VEC64_SET_BIT(vec64, idx) \ 420255736Sdavidch __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ 421255736Sdavidch (idx) & BIT_VEC64_ELEM_MASK) 422255736Sdavidch 423255736Sdavidch#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ 424255736Sdavidch __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ 425255736Sdavidch (idx) & BIT_VEC64_ELEM_MASK) 426255736Sdavidch 427255736Sdavidch#define BIT_VEC64_TEST_BIT(vec64, idx) \ 428255736Sdavidch (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ 429255736Sdavidch ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) 430255736Sdavidch 431255736Sdavidch/* 432255736Sdavidch * Creates a bitmask of all ones in less significant bits. 433255736Sdavidch * idx - index of the most significant bit in the created mask 434255736Sdavidch */ 435255736Sdavidch#define BIT_VEC64_ONES_MASK(idx) \ 436255736Sdavidch (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) 437255736Sdavidch#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0)) 438255736Sdavidch 439255736Sdavidch/* fill in a MAC address the way the FW likes it */ 440255736Sdavidchstatic inline void 441255736Sdavidchecore_set_fw_mac_addr(uint16_t *fw_hi, 442255736Sdavidch uint16_t *fw_mid, 443255736Sdavidch uint16_t *fw_lo, 444255736Sdavidch uint8_t *mac) 445255736Sdavidch{ 446255736Sdavidch ((uint8_t *)fw_hi)[0] = mac[1]; 447255736Sdavidch ((uint8_t *)fw_hi)[1] = mac[0]; 448255736Sdavidch ((uint8_t *)fw_mid)[0] = mac[3]; 449255736Sdavidch ((uint8_t *)fw_mid)[1] = mac[2]; 450255736Sdavidch ((uint8_t *)fw_lo)[0] = mac[5]; 451255736Sdavidch ((uint8_t *)fw_lo)[1] = mac[4]; 452255736Sdavidch} 453255736Sdavidch 454255736Sdavidch 455255736Sdavidchenum ecore_status_t { 456255736Sdavidch ECORE_EXISTS = -6, 457255736Sdavidch ECORE_IO = -5, 458255736Sdavidch ECORE_TIMEOUT = -4, 459255736Sdavidch ECORE_INVAL = -3, 460255736Sdavidch ECORE_BUSY = -2, 461255736Sdavidch ECORE_NOMEM = -1, 462255736Sdavidch ECORE_SUCCESS = 0, 463255736Sdavidch /* PENDING is not an error and should be positive */ 464255736Sdavidch ECORE_PENDING = 1, 465255736Sdavidch}; 466255736Sdavidch 467255736Sdavidchenum { 468255736Sdavidch SWITCH_UPDATE, 469255736Sdavidch AFEX_UPDATE, 470255736Sdavidch}; 471255736Sdavidch 472255736Sdavidch 473255736Sdavidch 474255736Sdavidch 475255736Sdavidchstruct bxe_softc; 476255736Sdavidchstruct eth_context; 477255736Sdavidch 478255736Sdavidch/* Bits representing general command's configuration */ 479255736Sdavidchenum { 480255736Sdavidch RAMROD_TX, 481255736Sdavidch RAMROD_RX, 482255736Sdavidch /* Wait until all pending commands complete */ 483255736Sdavidch RAMROD_COMP_WAIT, 484255736Sdavidch /* Don't send a ramrod, only update a registry */ 485255736Sdavidch RAMROD_DRV_CLR_ONLY, 486255736Sdavidch /* Configure HW according to the current object state */ 487255736Sdavidch RAMROD_RESTORE, 488255736Sdavidch /* Execute the next command now */ 489255736Sdavidch RAMROD_EXEC, 490255736Sdavidch /* Don't add a new command and continue execution of posponed 491255736Sdavidch * commands. If not set a new command will be added to the 492255736Sdavidch * pending commands list. 493255736Sdavidch */ 494255736Sdavidch RAMROD_CONT, 495255736Sdavidch /* If there is another pending ramrod, wait until it finishes and 496255736Sdavidch * re-try to submit this one. This flag can be set only in sleepable 497255736Sdavidch * context, and should not be set from the context that completes the 498255736Sdavidch * ramrods as deadlock will occur. 499255736Sdavidch */ 500255736Sdavidch RAMROD_RETRY, 501255736Sdavidch}; 502255736Sdavidch 503255736Sdavidchtypedef enum { 504255736Sdavidch ECORE_OBJ_TYPE_RX, 505255736Sdavidch ECORE_OBJ_TYPE_TX, 506255736Sdavidch ECORE_OBJ_TYPE_RX_TX, 507255736Sdavidch} ecore_obj_type; 508255736Sdavidch 509255736Sdavidch/* Public slow path states */ 510255736Sdavidchenum { 511255736Sdavidch ECORE_FILTER_MAC_PENDING, 512255736Sdavidch ECORE_FILTER_VLAN_PENDING, 513255736Sdavidch ECORE_FILTER_VLAN_MAC_PENDING, 514255736Sdavidch ECORE_FILTER_RX_MODE_PENDING, 515255736Sdavidch ECORE_FILTER_RX_MODE_SCHED, 516255736Sdavidch ECORE_FILTER_ISCSI_ETH_START_SCHED, 517255736Sdavidch ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 518255736Sdavidch ECORE_FILTER_FCOE_ETH_START_SCHED, 519255736Sdavidch ECORE_FILTER_FCOE_ETH_STOP_SCHED, 520255736Sdavidch ECORE_FILTER_BYPASS_RX_MODE_PENDING, 521255736Sdavidch ECORE_FILTER_BYPASS_MAC_PENDING, 522255736Sdavidch ECORE_FILTER_BYPASS_RSS_CONF_PENDING, 523255736Sdavidch ECORE_FILTER_MCAST_PENDING, 524255736Sdavidch ECORE_FILTER_MCAST_SCHED, 525255736Sdavidch ECORE_FILTER_RSS_CONF_PENDING, 526255736Sdavidch ECORE_AFEX_FCOE_Q_UPDATE_PENDING, 527255736Sdavidch ECORE_AFEX_PENDING_VIFSET_MCP_ACK 528255736Sdavidch}; 529255736Sdavidch 530255736Sdavidchstruct ecore_raw_obj { 531255736Sdavidch uint8_t func_id; 532255736Sdavidch 533255736Sdavidch /* Queue params */ 534255736Sdavidch uint8_t cl_id; 535255736Sdavidch uint32_t cid; 536255736Sdavidch 537255736Sdavidch /* Ramrod data buffer params */ 538255736Sdavidch void *rdata; 539255736Sdavidch ecore_dma_addr_t rdata_mapping; 540255736Sdavidch 541255736Sdavidch /* Ramrod state params */ 542255736Sdavidch int state; /* "ramrod is pending" state bit */ 543255736Sdavidch unsigned long *pstate; /* pointer to state buffer */ 544255736Sdavidch 545255736Sdavidch ecore_obj_type obj_type; 546255736Sdavidch 547255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, 548255736Sdavidch struct ecore_raw_obj *o); 549255736Sdavidch 550255736Sdavidch bool (*check_pending)(struct ecore_raw_obj *o); 551255736Sdavidch void (*clear_pending)(struct ecore_raw_obj *o); 552255736Sdavidch void (*set_pending)(struct ecore_raw_obj *o); 553255736Sdavidch}; 554255736Sdavidch 555255736Sdavidch/************************* VLAN-MAC commands related parameters ***************/ 556255736Sdavidchstruct ecore_mac_ramrod_data { 557255736Sdavidch uint8_t mac[ETH_ALEN]; 558255736Sdavidch uint8_t is_inner_mac; 559255736Sdavidch}; 560255736Sdavidch 561255736Sdavidchstruct ecore_vlan_ramrod_data { 562255736Sdavidch uint16_t vlan; 563255736Sdavidch}; 564255736Sdavidch 565255736Sdavidchstruct ecore_vlan_mac_ramrod_data { 566255736Sdavidch uint8_t mac[ETH_ALEN]; 567255736Sdavidch uint8_t is_inner_mac; 568255736Sdavidch uint16_t vlan; 569255736Sdavidch}; 570255736Sdavidch 571255736Sdavidchunion ecore_classification_ramrod_data { 572255736Sdavidch struct ecore_mac_ramrod_data mac; 573255736Sdavidch struct ecore_vlan_ramrod_data vlan; 574255736Sdavidch struct ecore_vlan_mac_ramrod_data vlan_mac; 575255736Sdavidch}; 576255736Sdavidch 577255736Sdavidch/* VLAN_MAC commands */ 578255736Sdavidchenum ecore_vlan_mac_cmd { 579255736Sdavidch ECORE_VLAN_MAC_ADD, 580255736Sdavidch ECORE_VLAN_MAC_DEL, 581255736Sdavidch ECORE_VLAN_MAC_MOVE, 582255736Sdavidch}; 583255736Sdavidch 584255736Sdavidchstruct ecore_vlan_mac_data { 585255736Sdavidch /* Requested command: ECORE_VLAN_MAC_XX */ 586255736Sdavidch enum ecore_vlan_mac_cmd cmd; 587255736Sdavidch /* used to contain the data related vlan_mac_flags bits from 588255736Sdavidch * ramrod parameters. 589255736Sdavidch */ 590255736Sdavidch unsigned long vlan_mac_flags; 591255736Sdavidch 592255736Sdavidch /* Needed for MOVE command */ 593255736Sdavidch struct ecore_vlan_mac_obj *target_obj; 594255736Sdavidch 595255736Sdavidch union ecore_classification_ramrod_data u; 596255736Sdavidch}; 597255736Sdavidch 598255736Sdavidch/*************************** Exe Queue obj ************************************/ 599255736Sdavidchunion ecore_exe_queue_cmd_data { 600255736Sdavidch struct ecore_vlan_mac_data vlan_mac; 601255736Sdavidch 602255736Sdavidch struct { 603255736Sdavidch /* TODO */ 604255736Sdavidch } mcast; 605255736Sdavidch}; 606255736Sdavidch 607255736Sdavidchstruct ecore_exeq_elem { 608255736Sdavidch ecore_list_entry_t link; 609255736Sdavidch 610255736Sdavidch /* Length of this element in the exe_chunk. */ 611255736Sdavidch int cmd_len; 612255736Sdavidch 613255736Sdavidch union ecore_exe_queue_cmd_data cmd_data; 614255736Sdavidch}; 615255736Sdavidch 616255736Sdavidchunion ecore_qable_obj; 617255736Sdavidch 618255736Sdavidchunion ecore_exeq_comp_elem { 619255736Sdavidch union event_ring_elem *elem; 620255736Sdavidch}; 621255736Sdavidch 622255736Sdavidchstruct ecore_exe_queue_obj; 623255736Sdavidch 624255736Sdavidchtypedef int (*exe_q_validate)(struct bxe_softc *sc, 625255736Sdavidch union ecore_qable_obj *o, 626255736Sdavidch struct ecore_exeq_elem *elem); 627255736Sdavidch 628255736Sdavidchtypedef int (*exe_q_remove)(struct bxe_softc *sc, 629255736Sdavidch union ecore_qable_obj *o, 630255736Sdavidch struct ecore_exeq_elem *elem); 631255736Sdavidch 632255736Sdavidch/* Return positive if entry was optimized, 0 - if not, negative 633255736Sdavidch * in case of an error. 634255736Sdavidch */ 635255736Sdavidchtypedef int (*exe_q_optimize)(struct bxe_softc *sc, 636255736Sdavidch union ecore_qable_obj *o, 637255736Sdavidch struct ecore_exeq_elem *elem); 638255736Sdavidchtypedef int (*exe_q_execute)(struct bxe_softc *sc, 639255736Sdavidch union ecore_qable_obj *o, 640255736Sdavidch ecore_list_t *exe_chunk, 641255736Sdavidch unsigned long *ramrod_flags); 642255736Sdavidchtypedef struct ecore_exeq_elem * 643255736Sdavidch (*exe_q_get)(struct ecore_exe_queue_obj *o, 644255736Sdavidch struct ecore_exeq_elem *elem); 645255736Sdavidch 646255736Sdavidchstruct ecore_exe_queue_obj { 647255736Sdavidch /* Commands pending for an execution. */ 648255736Sdavidch ecore_list_t exe_queue; 649255736Sdavidch 650255736Sdavidch /* Commands pending for an completion. */ 651255736Sdavidch ecore_list_t pending_comp; 652255736Sdavidch 653255736Sdavidch ECORE_MUTEX_SPIN lock; 654255736Sdavidch 655255736Sdavidch /* Maximum length of commands' list for one execution */ 656255736Sdavidch int exe_chunk_len; 657255736Sdavidch 658255736Sdavidch union ecore_qable_obj *owner; 659255736Sdavidch 660255736Sdavidch /****** Virtual functions ******/ 661255736Sdavidch /** 662255736Sdavidch * Called before commands execution for commands that are really 663255736Sdavidch * going to be executed (after 'optimize'). 664255736Sdavidch * 665255736Sdavidch * Must run under exe_queue->lock 666255736Sdavidch */ 667255736Sdavidch exe_q_validate validate; 668255736Sdavidch 669255736Sdavidch /** 670255736Sdavidch * Called before removing pending commands, cleaning allocated 671255736Sdavidch * resources (e.g., credits from validate) 672255736Sdavidch */ 673255736Sdavidch exe_q_remove remove; 674255736Sdavidch 675255736Sdavidch /** 676255736Sdavidch * This will try to cancel the current pending commands list 677255736Sdavidch * considering the new command. 678255736Sdavidch * 679255736Sdavidch * Returns the number of optimized commands or a negative error code 680255736Sdavidch * 681255736Sdavidch * Must run under exe_queue->lock 682255736Sdavidch */ 683255736Sdavidch exe_q_optimize optimize; 684255736Sdavidch 685255736Sdavidch /** 686255736Sdavidch * Run the next commands chunk (owner specific). 687255736Sdavidch */ 688255736Sdavidch exe_q_execute execute; 689255736Sdavidch 690255736Sdavidch /** 691255736Sdavidch * Return the exe_queue element containing the specific command 692255736Sdavidch * if any. Otherwise return NULL. 693255736Sdavidch */ 694255736Sdavidch exe_q_get get; 695255736Sdavidch}; 696255736Sdavidch/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 697255736Sdavidch/* 698255736Sdavidch * Element in the VLAN_MAC registry list having all current configured 699255736Sdavidch * rules. 700255736Sdavidch */ 701255736Sdavidchstruct ecore_vlan_mac_registry_elem { 702255736Sdavidch ecore_list_entry_t link; 703255736Sdavidch 704255736Sdavidch /* Used to store the cam offset used for the mac/vlan/vlan-mac. 705255736Sdavidch * Relevant for 57710 and 57711 only. VLANs and MACs share the 706255736Sdavidch * same CAM for these chips. 707255736Sdavidch */ 708255736Sdavidch int cam_offset; 709255736Sdavidch 710255736Sdavidch /* Needed for DEL and RESTORE flows */ 711255736Sdavidch unsigned long vlan_mac_flags; 712255736Sdavidch 713255736Sdavidch union ecore_classification_ramrod_data u; 714255736Sdavidch}; 715255736Sdavidch 716255736Sdavidch/* Bits representing VLAN_MAC commands specific flags */ 717255736Sdavidchenum { 718255736Sdavidch ECORE_UC_LIST_MAC, 719255736Sdavidch ECORE_ETH_MAC, 720255736Sdavidch ECORE_ISCSI_ETH_MAC, 721255736Sdavidch ECORE_NETQ_ETH_MAC, 722255736Sdavidch ECORE_DONT_CONSUME_CAM_CREDIT, 723255736Sdavidch ECORE_DONT_CONSUME_CAM_CREDIT_DEST, 724255736Sdavidch}; 725255736Sdavidch 726255736Sdavidchstruct ecore_vlan_mac_ramrod_params { 727255736Sdavidch /* Object to run the command from */ 728255736Sdavidch struct ecore_vlan_mac_obj *vlan_mac_obj; 729255736Sdavidch 730255736Sdavidch /* General command flags: COMP_WAIT, etc. */ 731255736Sdavidch unsigned long ramrod_flags; 732255736Sdavidch 733255736Sdavidch /* Command specific configuration request */ 734255736Sdavidch struct ecore_vlan_mac_data user_req; 735255736Sdavidch}; 736255736Sdavidch 737255736Sdavidchstruct ecore_vlan_mac_obj { 738255736Sdavidch struct ecore_raw_obj raw; 739255736Sdavidch 740255736Sdavidch /* Bookkeeping list: will prevent the addition of already existing 741255736Sdavidch * entries. 742255736Sdavidch */ 743255736Sdavidch ecore_list_t head; 744255736Sdavidch /* Implement a simple reader/writer lock on the head list. 745255736Sdavidch * all these fields should only be accessed under the exe_queue lock 746255736Sdavidch */ 747255736Sdavidch uint8_t head_reader; /* Num. of readers accessing head list */ 748255736Sdavidch bool head_exe_request; /* Pending execution request. */ 749255736Sdavidch unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ 750255736Sdavidch 751255736Sdavidch /* Execution queue interface instance */ 752255736Sdavidch struct ecore_exe_queue_obj exe_queue; 753255736Sdavidch 754255736Sdavidch /* MACs credit pool */ 755255736Sdavidch struct ecore_credit_pool_obj *macs_pool; 756255736Sdavidch 757255736Sdavidch /* VLANs credit pool */ 758255736Sdavidch struct ecore_credit_pool_obj *vlans_pool; 759255736Sdavidch 760255736Sdavidch /* RAMROD command to be used */ 761255736Sdavidch int ramrod_cmd; 762255736Sdavidch 763255736Sdavidch /* copy first n elements onto preallocated buffer 764255736Sdavidch * 765255736Sdavidch * @param n number of elements to get 766255736Sdavidch * @param buf buffer preallocated by caller into which elements 767255736Sdavidch * will be copied. Note elements are 4-byte aligned 768255736Sdavidch * so buffer size must be able to accommodate the 769255736Sdavidch * aligned elements. 770255736Sdavidch * 771255736Sdavidch * @return number of copied bytes 772255736Sdavidch */ 773255736Sdavidch 774255736Sdavidch int (*get_n_elements)(struct bxe_softc *sc, 775255736Sdavidch struct ecore_vlan_mac_obj *o, int n, uint8_t *base, 776255736Sdavidch uint8_t stride, uint8_t size); 777255736Sdavidch 778255736Sdavidch /** 779255736Sdavidch * Checks if ADD-ramrod with the given params may be performed. 780255736Sdavidch * 781255736Sdavidch * @return zero if the element may be added 782255736Sdavidch */ 783255736Sdavidch 784255736Sdavidch int (*check_add)(struct bxe_softc *sc, 785255736Sdavidch struct ecore_vlan_mac_obj *o, 786255736Sdavidch union ecore_classification_ramrod_data *data); 787255736Sdavidch 788255736Sdavidch /** 789255736Sdavidch * Checks if DEL-ramrod with the given params may be performed. 790255736Sdavidch * 791255736Sdavidch * @return TRUE if the element may be deleted 792255736Sdavidch */ 793255736Sdavidch struct ecore_vlan_mac_registry_elem * 794255736Sdavidch (*check_del)(struct bxe_softc *sc, 795255736Sdavidch struct ecore_vlan_mac_obj *o, 796255736Sdavidch union ecore_classification_ramrod_data *data); 797255736Sdavidch 798255736Sdavidch /** 799255736Sdavidch * Checks if DEL-ramrod with the given params may be performed. 800255736Sdavidch * 801255736Sdavidch * @return TRUE if the element may be deleted 802255736Sdavidch */ 803255736Sdavidch bool (*check_move)(struct bxe_softc *sc, 804255736Sdavidch struct ecore_vlan_mac_obj *src_o, 805255736Sdavidch struct ecore_vlan_mac_obj *dst_o, 806255736Sdavidch union ecore_classification_ramrod_data *data); 807255736Sdavidch 808255736Sdavidch /** 809255736Sdavidch * Update the relevant credit object(s) (consume/return 810255736Sdavidch * correspondingly). 811255736Sdavidch */ 812255736Sdavidch bool (*get_credit)(struct ecore_vlan_mac_obj *o); 813255736Sdavidch bool (*put_credit)(struct ecore_vlan_mac_obj *o); 814255736Sdavidch bool (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset); 815255736Sdavidch bool (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset); 816255736Sdavidch 817255736Sdavidch /** 818255736Sdavidch * Configures one rule in the ramrod data buffer. 819255736Sdavidch */ 820255736Sdavidch void (*set_one_rule)(struct bxe_softc *sc, 821255736Sdavidch struct ecore_vlan_mac_obj *o, 822255736Sdavidch struct ecore_exeq_elem *elem, int rule_idx, 823255736Sdavidch int cam_offset); 824255736Sdavidch 825255736Sdavidch /** 826255736Sdavidch * Delete all configured elements having the given 827255736Sdavidch * vlan_mac_flags specification. Assumes no pending for 828255736Sdavidch * execution commands. Will schedule all all currently 829255736Sdavidch * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags 830255736Sdavidch * specification for deletion and will use the given 831255736Sdavidch * ramrod_flags for the last DEL operation. 832255736Sdavidch * 833255736Sdavidch * @param sc 834255736Sdavidch * @param o 835255736Sdavidch * @param ramrod_flags RAMROD_XX flags 836255736Sdavidch * 837255736Sdavidch * @return 0 if the last operation has completed successfully 838255736Sdavidch * and there are no more elements left, positive value 839255736Sdavidch * if there are pending for completion commands, 840255736Sdavidch * negative value in case of failure. 841255736Sdavidch */ 842255736Sdavidch int (*delete_all)(struct bxe_softc *sc, 843255736Sdavidch struct ecore_vlan_mac_obj *o, 844255736Sdavidch unsigned long *vlan_mac_flags, 845255736Sdavidch unsigned long *ramrod_flags); 846255736Sdavidch 847255736Sdavidch /** 848255736Sdavidch * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously 849255736Sdavidch * configured elements list. 850255736Sdavidch * 851255736Sdavidch * @param sc 852255736Sdavidch * @param p Command parameters (RAMROD_COMP_WAIT bit in 853255736Sdavidch * ramrod_flags is only taken into an account) 854255736Sdavidch * @param ppos a pointer to the cookie that should be given back in the 855255736Sdavidch * next call to make function handle the next element. If 856255736Sdavidch * *ppos is set to NULL it will restart the iterator. 857255736Sdavidch * If returned *ppos == NULL this means that the last 858255736Sdavidch * element has been handled. 859255736Sdavidch * 860255736Sdavidch * @return int 861255736Sdavidch */ 862255736Sdavidch int (*restore)(struct bxe_softc *sc, 863255736Sdavidch struct ecore_vlan_mac_ramrod_params *p, 864255736Sdavidch struct ecore_vlan_mac_registry_elem **ppos); 865255736Sdavidch 866255736Sdavidch /** 867255736Sdavidch * Should be called on a completion arrival. 868255736Sdavidch * 869255736Sdavidch * @param sc 870255736Sdavidch * @param o 871255736Sdavidch * @param cqe Completion element we are handling 872255736Sdavidch * @param ramrod_flags if RAMROD_CONT is set the next bulk of 873255736Sdavidch * pending commands will be executed. 874255736Sdavidch * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE 875255736Sdavidch * may also be set if needed. 876255736Sdavidch * 877255736Sdavidch * @return 0 if there are neither pending nor waiting for 878255736Sdavidch * completion commands. Positive value if there are 879255736Sdavidch * pending for execution or for completion commands. 880255736Sdavidch * Negative value in case of an error (including an 881255736Sdavidch * error in the cqe). 882255736Sdavidch */ 883255736Sdavidch int (*complete)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o, 884255736Sdavidch union event_ring_elem *cqe, 885255736Sdavidch unsigned long *ramrod_flags); 886255736Sdavidch 887255736Sdavidch /** 888255736Sdavidch * Wait for completion of all commands. Don't schedule new ones, 889255736Sdavidch * just wait. It assumes that the completion code will schedule 890255736Sdavidch * for new commands. 891255736Sdavidch */ 892255736Sdavidch int (*wait)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o); 893255736Sdavidch}; 894255736Sdavidch 895255736Sdavidchenum { 896255736Sdavidch ECORE_LLH_CAM_ISCSI_ETH_LINE = 0, 897255736Sdavidch ECORE_LLH_CAM_ETH_LINE, 898255736Sdavidch ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 899255736Sdavidch}; 900255736Sdavidch 901255736Sdavidchvoid ecore_set_mac_in_nig(struct bxe_softc *sc, 902255736Sdavidch bool add, unsigned char *dev_addr, int index); 903255736Sdavidch 904255736Sdavidch/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 905255736Sdavidch 906255736Sdavidch/* RX_MODE ramrod special flags: set in rx_mode_flags field in 907255736Sdavidch * a ecore_rx_mode_ramrod_params. 908255736Sdavidch */ 909255736Sdavidchenum { 910255736Sdavidch ECORE_RX_MODE_FCOE_ETH, 911255736Sdavidch ECORE_RX_MODE_ISCSI_ETH, 912255736Sdavidch}; 913255736Sdavidch 914255736Sdavidchenum { 915255736Sdavidch ECORE_ACCEPT_UNICAST, 916255736Sdavidch ECORE_ACCEPT_MULTICAST, 917255736Sdavidch ECORE_ACCEPT_ALL_UNICAST, 918255736Sdavidch ECORE_ACCEPT_ALL_MULTICAST, 919255736Sdavidch ECORE_ACCEPT_BROADCAST, 920255736Sdavidch ECORE_ACCEPT_UNMATCHED, 921255736Sdavidch ECORE_ACCEPT_ANY_VLAN 922255736Sdavidch}; 923255736Sdavidch 924255736Sdavidchstruct ecore_rx_mode_ramrod_params { 925255736Sdavidch struct ecore_rx_mode_obj *rx_mode_obj; 926255736Sdavidch unsigned long *pstate; 927255736Sdavidch int state; 928255736Sdavidch uint8_t cl_id; 929255736Sdavidch uint32_t cid; 930255736Sdavidch uint8_t func_id; 931255736Sdavidch unsigned long ramrod_flags; 932255736Sdavidch unsigned long rx_mode_flags; 933255736Sdavidch 934255736Sdavidch /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to 935255736Sdavidch * a tstorm_eth_mac_filter_config (e1x). 936255736Sdavidch */ 937255736Sdavidch void *rdata; 938255736Sdavidch ecore_dma_addr_t rdata_mapping; 939255736Sdavidch 940255736Sdavidch /* Rx mode settings */ 941255736Sdavidch unsigned long rx_accept_flags; 942255736Sdavidch 943255736Sdavidch /* internal switching settings */ 944255736Sdavidch unsigned long tx_accept_flags; 945255736Sdavidch}; 946255736Sdavidch 947255736Sdavidchstruct ecore_rx_mode_obj { 948255736Sdavidch int (*config_rx_mode)(struct bxe_softc *sc, 949255736Sdavidch struct ecore_rx_mode_ramrod_params *p); 950255736Sdavidch 951255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, 952255736Sdavidch struct ecore_rx_mode_ramrod_params *p); 953255736Sdavidch}; 954255736Sdavidch 955255736Sdavidch/********************** Set multicast group ***********************************/ 956255736Sdavidch 957255736Sdavidchstruct ecore_mcast_list_elem { 958255736Sdavidch ecore_list_entry_t link; 959255736Sdavidch uint8_t *mac; 960255736Sdavidch}; 961255736Sdavidch 962255736Sdavidchunion ecore_mcast_config_data { 963255736Sdavidch uint8_t *mac; 964255736Sdavidch uint8_t bin; /* used in a RESTORE flow */ 965255736Sdavidch}; 966255736Sdavidch 967255736Sdavidchstruct ecore_mcast_ramrod_params { 968255736Sdavidch struct ecore_mcast_obj *mcast_obj; 969255736Sdavidch 970255736Sdavidch /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ 971255736Sdavidch unsigned long ramrod_flags; 972255736Sdavidch 973255736Sdavidch ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */ 974255736Sdavidch /** TODO: 975255736Sdavidch * - rename it to macs_num. 976255736Sdavidch * - Add a new command type for handling pending commands 977255736Sdavidch * (remove "zero semantics"). 978255736Sdavidch * 979255736Sdavidch * Length of mcast_list. If zero and ADD_CONT command - post 980255736Sdavidch * pending commands. 981255736Sdavidch */ 982255736Sdavidch int mcast_list_len; 983255736Sdavidch}; 984255736Sdavidch 985255736Sdavidchenum ecore_mcast_cmd { 986255736Sdavidch ECORE_MCAST_CMD_ADD, 987255736Sdavidch ECORE_MCAST_CMD_CONT, 988255736Sdavidch ECORE_MCAST_CMD_DEL, 989255736Sdavidch ECORE_MCAST_CMD_RESTORE, 990255736Sdavidch}; 991255736Sdavidch 992255736Sdavidchstruct ecore_mcast_obj { 993255736Sdavidch struct ecore_raw_obj raw; 994255736Sdavidch 995255736Sdavidch union { 996255736Sdavidch struct { 997255736Sdavidch #define ECORE_MCAST_BINS_NUM 256 998255736Sdavidch #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64) 999255736Sdavidch uint64_t vec[ECORE_MCAST_VEC_SZ]; 1000255736Sdavidch 1001255736Sdavidch /** Number of BINs to clear. Should be updated 1002255736Sdavidch * immediately when a command arrives in order to 1003255736Sdavidch * properly create DEL commands. 1004255736Sdavidch */ 1005255736Sdavidch int num_bins_set; 1006255736Sdavidch } aprox_match; 1007255736Sdavidch 1008255736Sdavidch struct { 1009255736Sdavidch ecore_list_t macs; 1010255736Sdavidch int num_macs_set; 1011255736Sdavidch } exact_match; 1012255736Sdavidch } registry; 1013255736Sdavidch 1014255736Sdavidch /* Pending commands */ 1015255736Sdavidch ecore_list_t pending_cmds_head; 1016255736Sdavidch 1017255736Sdavidch /* A state that is set in raw.pstate, when there are pending commands */ 1018255736Sdavidch int sched_state; 1019255736Sdavidch 1020255736Sdavidch /* Maximal number of mcast MACs configured in one command */ 1021255736Sdavidch int max_cmd_len; 1022255736Sdavidch 1023255736Sdavidch /* Total number of currently pending MACs to configure: both 1024255736Sdavidch * in the pending commands list and in the current command. 1025255736Sdavidch */ 1026255736Sdavidch int total_pending_num; 1027255736Sdavidch 1028255736Sdavidch uint8_t engine_id; 1029255736Sdavidch 1030255736Sdavidch /** 1031255736Sdavidch * @param cmd command to execute (ECORE_MCAST_CMD_X, see above) 1032255736Sdavidch */ 1033255736Sdavidch int (*config_mcast)(struct bxe_softc *sc, 1034255736Sdavidch struct ecore_mcast_ramrod_params *p, 1035255736Sdavidch enum ecore_mcast_cmd cmd); 1036255736Sdavidch 1037255736Sdavidch /** 1038255736Sdavidch * Fills the ramrod data during the RESTORE flow. 1039255736Sdavidch * 1040255736Sdavidch * @param sc 1041255736Sdavidch * @param o 1042255736Sdavidch * @param start_idx Registry index to start from 1043255736Sdavidch * @param rdata_idx Index in the ramrod data to start from 1044255736Sdavidch * 1045255736Sdavidch * @return -1 if we handled the whole registry or index of the last 1046255736Sdavidch * handled registry element. 1047255736Sdavidch */ 1048255736Sdavidch int (*hdl_restore)(struct bxe_softc *sc, struct ecore_mcast_obj *o, 1049255736Sdavidch int start_bin, int *rdata_idx); 1050255736Sdavidch 1051255736Sdavidch int (*enqueue_cmd)(struct bxe_softc *sc, struct ecore_mcast_obj *o, 1052255736Sdavidch struct ecore_mcast_ramrod_params *p, 1053255736Sdavidch enum ecore_mcast_cmd cmd); 1054255736Sdavidch 1055255736Sdavidch void (*set_one_rule)(struct bxe_softc *sc, 1056255736Sdavidch struct ecore_mcast_obj *o, int idx, 1057255736Sdavidch union ecore_mcast_config_data *cfg_data, 1058255736Sdavidch enum ecore_mcast_cmd cmd); 1059255736Sdavidch 1060255736Sdavidch /** Checks if there are more mcast MACs to be set or a previous 1061255736Sdavidch * command is still pending. 1062255736Sdavidch */ 1063255736Sdavidch bool (*check_pending)(struct ecore_mcast_obj *o); 1064255736Sdavidch 1065255736Sdavidch /** 1066255736Sdavidch * Set/Clear/Check SCHEDULED state of the object 1067255736Sdavidch */ 1068255736Sdavidch void (*set_sched)(struct ecore_mcast_obj *o); 1069255736Sdavidch void (*clear_sched)(struct ecore_mcast_obj *o); 1070255736Sdavidch bool (*check_sched)(struct ecore_mcast_obj *o); 1071255736Sdavidch 1072255736Sdavidch /* Wait until all pending commands complete */ 1073255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, struct ecore_mcast_obj *o); 1074255736Sdavidch 1075255736Sdavidch /** 1076255736Sdavidch * Handle the internal object counters needed for proper 1077255736Sdavidch * commands handling. Checks that the provided parameters are 1078255736Sdavidch * feasible. 1079255736Sdavidch */ 1080255736Sdavidch int (*validate)(struct bxe_softc *sc, 1081255736Sdavidch struct ecore_mcast_ramrod_params *p, 1082255736Sdavidch enum ecore_mcast_cmd cmd); 1083255736Sdavidch 1084255736Sdavidch /** 1085255736Sdavidch * Restore the values of internal counters in case of a failure. 1086255736Sdavidch */ 1087255736Sdavidch void (*revert)(struct bxe_softc *sc, 1088255736Sdavidch struct ecore_mcast_ramrod_params *p, 1089255736Sdavidch int old_num_bins); 1090255736Sdavidch 1091255736Sdavidch int (*get_registry_size)(struct ecore_mcast_obj *o); 1092255736Sdavidch void (*set_registry_size)(struct ecore_mcast_obj *o, int n); 1093255736Sdavidch}; 1094255736Sdavidch 1095255736Sdavidch/*************************** Credit handling **********************************/ 1096255736Sdavidchstruct ecore_credit_pool_obj { 1097255736Sdavidch 1098255736Sdavidch /* Current amount of credit in the pool */ 1099255736Sdavidch ecore_atomic_t credit; 1100255736Sdavidch 1101255736Sdavidch /* Maximum allowed credit. put() will check against it. */ 1102255736Sdavidch int pool_sz; 1103255736Sdavidch 1104255736Sdavidch /* Allocate a pool table statically. 1105255736Sdavidch * 1106255736Sdavidch * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) 1107255736Sdavidch * 1108255736Sdavidch * The set bit in the table will mean that the entry is available. 1109255736Sdavidch */ 1110255736Sdavidch#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) 1111255736Sdavidch uint64_t pool_mirror[ECORE_POOL_VEC_SIZE]; 1112255736Sdavidch 1113255736Sdavidch /* Base pool offset (initialized differently */ 1114255736Sdavidch int base_pool_offset; 1115255736Sdavidch 1116255736Sdavidch /** 1117255736Sdavidch * Get the next free pool entry. 1118255736Sdavidch * 1119255736Sdavidch * @return TRUE if there was a free entry in the pool 1120255736Sdavidch */ 1121255736Sdavidch bool (*get_entry)(struct ecore_credit_pool_obj *o, int *entry); 1122255736Sdavidch 1123255736Sdavidch /** 1124255736Sdavidch * Return the entry back to the pool. 1125255736Sdavidch * 1126255736Sdavidch * @return TRUE if entry is legal and has been successfully 1127255736Sdavidch * returned to the pool. 1128255736Sdavidch */ 1129255736Sdavidch bool (*put_entry)(struct ecore_credit_pool_obj *o, int entry); 1130255736Sdavidch 1131255736Sdavidch /** 1132255736Sdavidch * Get the requested amount of credit from the pool. 1133255736Sdavidch * 1134255736Sdavidch * @param cnt Amount of requested credit 1135255736Sdavidch * @return TRUE if the operation is successful 1136255736Sdavidch */ 1137255736Sdavidch bool (*get)(struct ecore_credit_pool_obj *o, int cnt); 1138255736Sdavidch 1139255736Sdavidch /** 1140255736Sdavidch * Returns the credit to the pool. 1141255736Sdavidch * 1142255736Sdavidch * @param cnt Amount of credit to return 1143255736Sdavidch * @return TRUE if the operation is successful 1144255736Sdavidch */ 1145255736Sdavidch bool (*put)(struct ecore_credit_pool_obj *o, int cnt); 1146255736Sdavidch 1147255736Sdavidch /** 1148255736Sdavidch * Reads the current amount of credit. 1149255736Sdavidch */ 1150255736Sdavidch int (*check)(struct ecore_credit_pool_obj *o); 1151255736Sdavidch}; 1152255736Sdavidch 1153255736Sdavidch/*************************** RSS configuration ********************************/ 1154255736Sdavidchenum { 1155255736Sdavidch /* RSS_MODE bits are mutually exclusive */ 1156255736Sdavidch ECORE_RSS_MODE_DISABLED, 1157255736Sdavidch ECORE_RSS_MODE_REGULAR, 1158255736Sdavidch 1159255736Sdavidch ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ 1160255736Sdavidch 1161255736Sdavidch ECORE_RSS_IPV4, 1162255736Sdavidch ECORE_RSS_IPV4_TCP, 1163255736Sdavidch ECORE_RSS_IPV4_UDP, 1164255736Sdavidch ECORE_RSS_IPV6, 1165255736Sdavidch ECORE_RSS_IPV6_TCP, 1166255736Sdavidch ECORE_RSS_IPV6_UDP, 1167255736Sdavidch 1168255736Sdavidch ECORE_RSS_TUNNELING, 1169258203Sedavis#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */ 1170258203Sedavis ECORE_RSS_MODE_ESX51, 1171258203Sedavis#endif 1172255736Sdavidch}; 1173255736Sdavidch 1174255736Sdavidchstruct ecore_config_rss_params { 1175255736Sdavidch struct ecore_rss_config_obj *rss_obj; 1176255736Sdavidch 1177255736Sdavidch /* may have RAMROD_COMP_WAIT set only */ 1178255736Sdavidch unsigned long ramrod_flags; 1179255736Sdavidch 1180255736Sdavidch /* ECORE_RSS_X bits */ 1181255736Sdavidch unsigned long rss_flags; 1182255736Sdavidch 1183255736Sdavidch /* Number hash bits to take into an account */ 1184255736Sdavidch uint8_t rss_result_mask; 1185255736Sdavidch 1186255736Sdavidch /* Indirection table */ 1187255736Sdavidch uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 1188255736Sdavidch 1189255736Sdavidch /* RSS hash values */ 1190255736Sdavidch uint32_t rss_key[10]; 1191255736Sdavidch 1192255736Sdavidch /* valid only iff ECORE_RSS_UPDATE_TOE is set */ 1193255736Sdavidch uint16_t toe_rss_bitmap; 1194255736Sdavidch 1195255736Sdavidch /* valid iff ECORE_RSS_TUNNELING is set */ 1196255736Sdavidch uint16_t tunnel_value; 1197255736Sdavidch uint16_t tunnel_mask; 1198255736Sdavidch}; 1199255736Sdavidch 1200255736Sdavidchstruct ecore_rss_config_obj { 1201255736Sdavidch struct ecore_raw_obj raw; 1202255736Sdavidch 1203255736Sdavidch /* RSS engine to use */ 1204255736Sdavidch uint8_t engine_id; 1205255736Sdavidch 1206255736Sdavidch /* Last configured indirection table */ 1207255736Sdavidch uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 1208255736Sdavidch 1209255736Sdavidch /* flags for enabling 4-tupple hash on UDP */ 1210255736Sdavidch uint8_t udp_rss_v4; 1211255736Sdavidch uint8_t udp_rss_v6; 1212255736Sdavidch 1213255736Sdavidch int (*config_rss)(struct bxe_softc *sc, 1214255736Sdavidch struct ecore_config_rss_params *p); 1215255736Sdavidch}; 1216255736Sdavidch 1217255736Sdavidch/*********************** Queue state update ***********************************/ 1218255736Sdavidch 1219255736Sdavidch/* UPDATE command options */ 1220255736Sdavidchenum { 1221255736Sdavidch ECORE_Q_UPDATE_IN_VLAN_REM, 1222255736Sdavidch ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, 1223255736Sdavidch ECORE_Q_UPDATE_OUT_VLAN_REM, 1224255736Sdavidch ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, 1225255736Sdavidch ECORE_Q_UPDATE_ANTI_SPOOF, 1226255736Sdavidch ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, 1227255736Sdavidch ECORE_Q_UPDATE_ACTIVATE, 1228255736Sdavidch ECORE_Q_UPDATE_ACTIVATE_CHNG, 1229255736Sdavidch ECORE_Q_UPDATE_DEF_VLAN_EN, 1230255736Sdavidch ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, 1231255736Sdavidch ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, 1232255736Sdavidch ECORE_Q_UPDATE_SILENT_VLAN_REM, 1233255736Sdavidch ECORE_Q_UPDATE_TX_SWITCHING_CHNG, 1234255736Sdavidch ECORE_Q_UPDATE_TX_SWITCHING, 1235255736Sdavidch}; 1236255736Sdavidch 1237255736Sdavidch/* Allowed Queue states */ 1238255736Sdavidchenum ecore_q_state { 1239255736Sdavidch ECORE_Q_STATE_RESET, 1240255736Sdavidch ECORE_Q_STATE_INITIALIZED, 1241255736Sdavidch ECORE_Q_STATE_ACTIVE, 1242255736Sdavidch ECORE_Q_STATE_MULTI_COS, 1243255736Sdavidch ECORE_Q_STATE_MCOS_TERMINATED, 1244255736Sdavidch ECORE_Q_STATE_INACTIVE, 1245255736Sdavidch ECORE_Q_STATE_STOPPED, 1246255736Sdavidch ECORE_Q_STATE_TERMINATED, 1247255736Sdavidch ECORE_Q_STATE_FLRED, 1248255736Sdavidch ECORE_Q_STATE_MAX, 1249255736Sdavidch}; 1250255736Sdavidch 1251255736Sdavidch/* Allowed Queue states */ 1252255736Sdavidchenum ecore_q_logical_state { 1253255736Sdavidch ECORE_Q_LOGICAL_STATE_ACTIVE, 1254255736Sdavidch ECORE_Q_LOGICAL_STATE_STOPPED, 1255255736Sdavidch}; 1256255736Sdavidch 1257255736Sdavidch/* Allowed commands */ 1258255736Sdavidchenum ecore_queue_cmd { 1259255736Sdavidch ECORE_Q_CMD_INIT, 1260255736Sdavidch ECORE_Q_CMD_SETUP, 1261255736Sdavidch ECORE_Q_CMD_SETUP_TX_ONLY, 1262255736Sdavidch ECORE_Q_CMD_DEACTIVATE, 1263255736Sdavidch ECORE_Q_CMD_ACTIVATE, 1264255736Sdavidch ECORE_Q_CMD_UPDATE, 1265255736Sdavidch ECORE_Q_CMD_UPDATE_TPA, 1266255736Sdavidch ECORE_Q_CMD_HALT, 1267255736Sdavidch ECORE_Q_CMD_CFC_DEL, 1268255736Sdavidch ECORE_Q_CMD_TERMINATE, 1269255736Sdavidch ECORE_Q_CMD_EMPTY, 1270255736Sdavidch ECORE_Q_CMD_MAX, 1271255736Sdavidch}; 1272255736Sdavidch 1273255736Sdavidch/* queue SETUP + INIT flags */ 1274255736Sdavidchenum { 1275255736Sdavidch ECORE_Q_FLG_TPA, 1276255736Sdavidch ECORE_Q_FLG_TPA_IPV6, 1277255736Sdavidch ECORE_Q_FLG_TPA_GRO, 1278255736Sdavidch ECORE_Q_FLG_STATS, 1279255736Sdavidch ECORE_Q_FLG_ZERO_STATS, 1280255736Sdavidch ECORE_Q_FLG_ACTIVE, 1281255736Sdavidch ECORE_Q_FLG_OV, 1282255736Sdavidch ECORE_Q_FLG_VLAN, 1283255736Sdavidch ECORE_Q_FLG_COS, 1284255736Sdavidch ECORE_Q_FLG_HC, 1285255736Sdavidch ECORE_Q_FLG_HC_EN, 1286255736Sdavidch ECORE_Q_FLG_DHC, 1287255736Sdavidch ECORE_Q_FLG_OOO, 1288255736Sdavidch ECORE_Q_FLG_FCOE, 1289255736Sdavidch ECORE_Q_FLG_LEADING_RSS, 1290255736Sdavidch ECORE_Q_FLG_MCAST, 1291255736Sdavidch ECORE_Q_FLG_DEF_VLAN, 1292255736Sdavidch ECORE_Q_FLG_TX_SWITCH, 1293255736Sdavidch ECORE_Q_FLG_TX_SEC, 1294255736Sdavidch ECORE_Q_FLG_ANTI_SPOOF, 1295255736Sdavidch ECORE_Q_FLG_SILENT_VLAN_REM, 1296255736Sdavidch ECORE_Q_FLG_FORCE_DEFAULT_PRI, 1297255736Sdavidch ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, 1298255736Sdavidch ECORE_Q_FLG_PCSUM_ON_PKT, 1299255736Sdavidch ECORE_Q_FLG_TUN_INC_INNER_IP_ID 1300255736Sdavidch}; 1301255736Sdavidch 1302255736Sdavidch/* Queue type options: queue type may be a combination of below. */ 1303255736Sdavidchenum ecore_q_type { 1304255736Sdavidch ECORE_Q_TYPE_FWD, 1305255736Sdavidch /** TODO: Consider moving both these flags into the init() 1306255736Sdavidch * ramrod params. 1307255736Sdavidch */ 1308255736Sdavidch ECORE_Q_TYPE_HAS_RX, 1309255736Sdavidch ECORE_Q_TYPE_HAS_TX, 1310255736Sdavidch}; 1311255736Sdavidch 1312255736Sdavidch#define ECORE_PRIMARY_CID_INDEX 0 1313255736Sdavidch#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */ 1314255736Sdavidch#define ECORE_MULTI_TX_COS_E2_E3A0 2 1315255736Sdavidch#define ECORE_MULTI_TX_COS_E3B0 3 1316255736Sdavidch#define ECORE_MULTI_TX_COS 3 /* Maximum possible */ 1317255736Sdavidch#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN) 1318255736Sdavidch 1319255736Sdavidchstruct ecore_queue_init_params { 1320255736Sdavidch struct { 1321255736Sdavidch unsigned long flags; 1322255736Sdavidch uint16_t hc_rate; 1323255736Sdavidch uint8_t fw_sb_id; 1324255736Sdavidch uint8_t sb_cq_index; 1325255736Sdavidch } tx; 1326255736Sdavidch 1327255736Sdavidch struct { 1328255736Sdavidch unsigned long flags; 1329255736Sdavidch uint16_t hc_rate; 1330255736Sdavidch uint8_t fw_sb_id; 1331255736Sdavidch uint8_t sb_cq_index; 1332255736Sdavidch } rx; 1333255736Sdavidch 1334255736Sdavidch /* CID context in the host memory */ 1335255736Sdavidch struct eth_context *cxts[ECORE_MULTI_TX_COS]; 1336255736Sdavidch 1337255736Sdavidch /* maximum number of cos supported by hardware */ 1338255736Sdavidch uint8_t max_cos; 1339255736Sdavidch}; 1340255736Sdavidch 1341255736Sdavidchstruct ecore_queue_terminate_params { 1342255736Sdavidch /* index within the tx_only cids of this queue object */ 1343255736Sdavidch uint8_t cid_index; 1344255736Sdavidch}; 1345255736Sdavidch 1346255736Sdavidchstruct ecore_queue_cfc_del_params { 1347255736Sdavidch /* index within the tx_only cids of this queue object */ 1348255736Sdavidch uint8_t cid_index; 1349255736Sdavidch}; 1350255736Sdavidch 1351255736Sdavidchstruct ecore_queue_update_params { 1352255736Sdavidch unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */ 1353255736Sdavidch uint16_t def_vlan; 1354255736Sdavidch uint16_t silent_removal_value; 1355255736Sdavidch uint16_t silent_removal_mask; 1356255736Sdavidch/* index within the tx_only cids of this queue object */ 1357255736Sdavidch uint8_t cid_index; 1358255736Sdavidch}; 1359255736Sdavidch 1360255736Sdavidchstruct rxq_pause_params { 1361255736Sdavidch uint16_t bd_th_lo; 1362255736Sdavidch uint16_t bd_th_hi; 1363255736Sdavidch uint16_t rcq_th_lo; 1364255736Sdavidch uint16_t rcq_th_hi; 1365255736Sdavidch uint16_t sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */ 1366255736Sdavidch uint16_t sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */ 1367255736Sdavidch uint16_t pri_map; 1368255736Sdavidch}; 1369255736Sdavidch 1370255736Sdavidch/* general */ 1371255736Sdavidchstruct ecore_general_setup_params { 1372255736Sdavidch /* valid iff ECORE_Q_FLG_STATS */ 1373255736Sdavidch uint8_t stat_id; 1374255736Sdavidch 1375255736Sdavidch uint8_t spcl_id; 1376255736Sdavidch uint16_t mtu; 1377255736Sdavidch uint8_t cos; 1378255736Sdavidch}; 1379255736Sdavidch 1380255736Sdavidchstruct ecore_rxq_setup_params { 1381255736Sdavidch /* dma */ 1382255736Sdavidch ecore_dma_addr_t dscr_map; 1383255736Sdavidch ecore_dma_addr_t sge_map; 1384255736Sdavidch ecore_dma_addr_t rcq_map; 1385255736Sdavidch ecore_dma_addr_t rcq_np_map; 1386255736Sdavidch 1387255736Sdavidch uint16_t drop_flags; 1388255736Sdavidch uint16_t buf_sz; 1389255736Sdavidch uint8_t fw_sb_id; 1390255736Sdavidch uint8_t cl_qzone_id; 1391255736Sdavidch 1392255736Sdavidch /* valid iff ECORE_Q_FLG_TPA */ 1393255736Sdavidch uint16_t tpa_agg_sz; 1394255736Sdavidch uint16_t sge_buf_sz; 1395255736Sdavidch uint8_t max_sges_pkt; 1396255736Sdavidch uint8_t max_tpa_queues; 1397255736Sdavidch uint8_t rss_engine_id; 1398255736Sdavidch 1399255736Sdavidch /* valid iff ECORE_Q_FLG_MCAST */ 1400255736Sdavidch uint8_t mcast_engine_id; 1401255736Sdavidch 1402255736Sdavidch uint8_t cache_line_log; 1403255736Sdavidch 1404255736Sdavidch uint8_t sb_cq_index; 1405255736Sdavidch 1406255736Sdavidch /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ 1407255736Sdavidch uint16_t silent_removal_value; 1408255736Sdavidch uint16_t silent_removal_mask; 1409255736Sdavidch}; 1410255736Sdavidch 1411255736Sdavidchstruct ecore_txq_setup_params { 1412255736Sdavidch /* dma */ 1413255736Sdavidch ecore_dma_addr_t dscr_map; 1414255736Sdavidch 1415255736Sdavidch uint8_t fw_sb_id; 1416255736Sdavidch uint8_t sb_cq_index; 1417255736Sdavidch uint8_t cos; /* valid iff ECORE_Q_FLG_COS */ 1418255736Sdavidch uint16_t traffic_type; 1419255736Sdavidch /* equals to the leading rss client id, used for TX classification*/ 1420255736Sdavidch uint8_t tss_leading_cl_id; 1421255736Sdavidch 1422255736Sdavidch /* valid iff ECORE_Q_FLG_DEF_VLAN */ 1423255736Sdavidch uint16_t default_vlan; 1424255736Sdavidch}; 1425255736Sdavidch 1426255736Sdavidchstruct ecore_queue_setup_params { 1427255736Sdavidch struct ecore_general_setup_params gen_params; 1428255736Sdavidch struct ecore_txq_setup_params txq_params; 1429255736Sdavidch struct ecore_rxq_setup_params rxq_params; 1430255736Sdavidch struct rxq_pause_params pause_params; 1431255736Sdavidch unsigned long flags; 1432255736Sdavidch}; 1433255736Sdavidch 1434255736Sdavidchstruct ecore_queue_setup_tx_only_params { 1435255736Sdavidch struct ecore_general_setup_params gen_params; 1436255736Sdavidch struct ecore_txq_setup_params txq_params; 1437255736Sdavidch unsigned long flags; 1438255736Sdavidch /* index within the tx_only cids of this queue object */ 1439255736Sdavidch uint8_t cid_index; 1440255736Sdavidch}; 1441255736Sdavidch 1442255736Sdavidchstruct ecore_queue_state_params { 1443255736Sdavidch struct ecore_queue_sp_obj *q_obj; 1444255736Sdavidch 1445255736Sdavidch /* Current command */ 1446255736Sdavidch enum ecore_queue_cmd cmd; 1447255736Sdavidch 1448255736Sdavidch /* may have RAMROD_COMP_WAIT set only */ 1449255736Sdavidch unsigned long ramrod_flags; 1450255736Sdavidch 1451255736Sdavidch /* Params according to the current command */ 1452255736Sdavidch union { 1453255736Sdavidch struct ecore_queue_update_params update; 1454255736Sdavidch struct ecore_queue_setup_params setup; 1455255736Sdavidch struct ecore_queue_init_params init; 1456255736Sdavidch struct ecore_queue_setup_tx_only_params tx_only; 1457255736Sdavidch struct ecore_queue_terminate_params terminate; 1458255736Sdavidch struct ecore_queue_cfc_del_params cfc_del; 1459255736Sdavidch } params; 1460255736Sdavidch}; 1461255736Sdavidch 1462255736Sdavidchstruct ecore_viflist_params { 1463255736Sdavidch uint8_t echo_res; 1464255736Sdavidch uint8_t func_bit_map_res; 1465255736Sdavidch}; 1466255736Sdavidch 1467255736Sdavidchstruct ecore_queue_sp_obj { 1468255736Sdavidch uint32_t cids[ECORE_MULTI_TX_COS]; 1469255736Sdavidch uint8_t cl_id; 1470255736Sdavidch uint8_t func_id; 1471255736Sdavidch 1472255736Sdavidch /* number of traffic classes supported by queue. 1473255736Sdavidch * The primary connection of the queue supports the first traffic 1474255736Sdavidch * class. Any further traffic class is supported by a tx-only 1475255736Sdavidch * connection. 1476255736Sdavidch * 1477255736Sdavidch * Therefore max_cos is also a number of valid entries in the cids 1478255736Sdavidch * array. 1479255736Sdavidch */ 1480255736Sdavidch uint8_t max_cos; 1481255736Sdavidch uint8_t num_tx_only, next_tx_only; 1482255736Sdavidch 1483255736Sdavidch enum ecore_q_state state, next_state; 1484255736Sdavidch 1485255736Sdavidch /* bits from enum ecore_q_type */ 1486255736Sdavidch unsigned long type; 1487255736Sdavidch 1488255736Sdavidch /* ECORE_Q_CMD_XX bits. This object implements "one 1489255736Sdavidch * pending" paradigm but for debug and tracing purposes it's 1490255736Sdavidch * more convenient to have different bits for different 1491255736Sdavidch * commands. 1492255736Sdavidch */ 1493255736Sdavidch unsigned long pending; 1494255736Sdavidch 1495255736Sdavidch /* Buffer to use as a ramrod data and its mapping */ 1496255736Sdavidch void *rdata; 1497255736Sdavidch ecore_dma_addr_t rdata_mapping; 1498255736Sdavidch 1499255736Sdavidch /** 1500255736Sdavidch * Performs one state change according to the given parameters. 1501255736Sdavidch * 1502255736Sdavidch * @return 0 in case of success and negative value otherwise. 1503255736Sdavidch */ 1504255736Sdavidch int (*send_cmd)(struct bxe_softc *sc, 1505255736Sdavidch struct ecore_queue_state_params *params); 1506255736Sdavidch 1507255736Sdavidch /** 1508255736Sdavidch * Sets the pending bit according to the requested transition. 1509255736Sdavidch */ 1510255736Sdavidch int (*set_pending)(struct ecore_queue_sp_obj *o, 1511255736Sdavidch struct ecore_queue_state_params *params); 1512255736Sdavidch 1513255736Sdavidch /** 1514255736Sdavidch * Checks that the requested state transition is legal. 1515255736Sdavidch */ 1516255736Sdavidch int (*check_transition)(struct bxe_softc *sc, 1517255736Sdavidch struct ecore_queue_sp_obj *o, 1518255736Sdavidch struct ecore_queue_state_params *params); 1519255736Sdavidch 1520255736Sdavidch /** 1521255736Sdavidch * Completes the pending command. 1522255736Sdavidch */ 1523255736Sdavidch int (*complete_cmd)(struct bxe_softc *sc, 1524255736Sdavidch struct ecore_queue_sp_obj *o, 1525255736Sdavidch enum ecore_queue_cmd); 1526255736Sdavidch 1527255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, 1528255736Sdavidch struct ecore_queue_sp_obj *o, 1529255736Sdavidch enum ecore_queue_cmd cmd); 1530255736Sdavidch}; 1531255736Sdavidch 1532255736Sdavidch/********************** Function state update *********************************/ 1533255736Sdavidch/* Allowed Function states */ 1534255736Sdavidchenum ecore_func_state { 1535255736Sdavidch ECORE_F_STATE_RESET, 1536255736Sdavidch ECORE_F_STATE_INITIALIZED, 1537255736Sdavidch ECORE_F_STATE_STARTED, 1538255736Sdavidch ECORE_F_STATE_TX_STOPPED, 1539255736Sdavidch ECORE_F_STATE_MAX, 1540255736Sdavidch}; 1541255736Sdavidch 1542255736Sdavidch/* Allowed Function commands */ 1543255736Sdavidchenum ecore_func_cmd { 1544255736Sdavidch ECORE_F_CMD_HW_INIT, 1545255736Sdavidch ECORE_F_CMD_START, 1546255736Sdavidch ECORE_F_CMD_STOP, 1547255736Sdavidch ECORE_F_CMD_HW_RESET, 1548255736Sdavidch ECORE_F_CMD_AFEX_UPDATE, 1549255736Sdavidch ECORE_F_CMD_AFEX_VIFLISTS, 1550255736Sdavidch ECORE_F_CMD_TX_STOP, 1551255736Sdavidch ECORE_F_CMD_TX_START, 1552255736Sdavidch ECORE_F_CMD_SWITCH_UPDATE, 1553255736Sdavidch ECORE_F_CMD_MAX, 1554255736Sdavidch}; 1555255736Sdavidch 1556255736Sdavidchstruct ecore_func_hw_init_params { 1557255736Sdavidch /* A load phase returned by MCP. 1558255736Sdavidch * 1559255736Sdavidch * May be: 1560255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 1561255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON 1562255736Sdavidch * FW_MSG_CODE_DRV_LOAD_PORT 1563255736Sdavidch * FW_MSG_CODE_DRV_LOAD_FUNCTION 1564255736Sdavidch */ 1565255736Sdavidch uint32_t load_phase; 1566255736Sdavidch}; 1567255736Sdavidch 1568255736Sdavidchstruct ecore_func_hw_reset_params { 1569255736Sdavidch /* A load phase returned by MCP. 1570255736Sdavidch * 1571255736Sdavidch * May be: 1572255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 1573255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON 1574255736Sdavidch * FW_MSG_CODE_DRV_LOAD_PORT 1575255736Sdavidch * FW_MSG_CODE_DRV_LOAD_FUNCTION 1576255736Sdavidch */ 1577255736Sdavidch uint32_t reset_phase; 1578255736Sdavidch}; 1579255736Sdavidch 1580255736Sdavidchstruct ecore_func_start_params { 1581255736Sdavidch /* Multi Function mode: 1582255736Sdavidch * - Single Function 1583255736Sdavidch * - Switch Dependent 1584255736Sdavidch * - Switch Independent 1585255736Sdavidch */ 1586255736Sdavidch uint16_t mf_mode; 1587255736Sdavidch 1588255736Sdavidch /* Switch Dependent mode outer VLAN tag */ 1589255736Sdavidch uint16_t sd_vlan_tag; 1590255736Sdavidch 1591255736Sdavidch /* Function cos mode */ 1592255736Sdavidch uint8_t network_cos_mode; 1593255736Sdavidch 1594255736Sdavidch /* NVGRE classification enablement */ 1595255736Sdavidch uint8_t nvgre_clss_en; 1596255736Sdavidch 1597255736Sdavidch /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ 1598255736Sdavidch uint8_t gre_tunnel_mode; 1599255736Sdavidch 1600255736Sdavidch /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ 1601255736Sdavidch uint8_t gre_tunnel_rss; 1602255736Sdavidch 1603255736Sdavidch}; 1604255736Sdavidch 1605255736Sdavidchstruct ecore_func_switch_update_params { 1606255736Sdavidch uint8_t suspend; 1607255736Sdavidch}; 1608255736Sdavidch 1609255736Sdavidchstruct ecore_func_afex_update_params { 1610255736Sdavidch uint16_t vif_id; 1611255736Sdavidch uint16_t afex_default_vlan; 1612255736Sdavidch uint8_t allowed_priorities; 1613255736Sdavidch}; 1614255736Sdavidch 1615255736Sdavidchstruct ecore_func_afex_viflists_params { 1616255736Sdavidch uint16_t vif_list_index; 1617255736Sdavidch uint8_t func_bit_map; 1618255736Sdavidch uint8_t afex_vif_list_command; 1619255736Sdavidch uint8_t func_to_clear; 1620255736Sdavidch}; 1621255736Sdavidchstruct ecore_func_tx_start_params { 1622255736Sdavidch struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; 1623255736Sdavidch uint8_t dcb_enabled; 1624255736Sdavidch uint8_t dcb_version; 1625255736Sdavidch uint8_t dont_add_pri_0; 1626255736Sdavidch}; 1627255736Sdavidch 1628255736Sdavidchstruct ecore_func_state_params { 1629255736Sdavidch struct ecore_func_sp_obj *f_obj; 1630255736Sdavidch 1631255736Sdavidch /* Current command */ 1632255736Sdavidch enum ecore_func_cmd cmd; 1633255736Sdavidch 1634255736Sdavidch /* may have RAMROD_COMP_WAIT set only */ 1635255736Sdavidch unsigned long ramrod_flags; 1636255736Sdavidch 1637255736Sdavidch /* Params according to the current command */ 1638255736Sdavidch union { 1639255736Sdavidch struct ecore_func_hw_init_params hw_init; 1640255736Sdavidch struct ecore_func_hw_reset_params hw_reset; 1641255736Sdavidch struct ecore_func_start_params start; 1642255736Sdavidch struct ecore_func_switch_update_params switch_update; 1643255736Sdavidch struct ecore_func_afex_update_params afex_update; 1644255736Sdavidch struct ecore_func_afex_viflists_params afex_viflists; 1645255736Sdavidch struct ecore_func_tx_start_params tx_start; 1646255736Sdavidch } params; 1647255736Sdavidch}; 1648255736Sdavidch 1649255736Sdavidchstruct ecore_func_sp_drv_ops { 1650255736Sdavidch /* Init tool + runtime initialization: 1651255736Sdavidch * - Common Chip 1652255736Sdavidch * - Common (per Path) 1653255736Sdavidch * - Port 1654255736Sdavidch * - Function phases 1655255736Sdavidch */ 1656255736Sdavidch int (*init_hw_cmn_chip)(struct bxe_softc *sc); 1657255736Sdavidch int (*init_hw_cmn)(struct bxe_softc *sc); 1658255736Sdavidch int (*init_hw_port)(struct bxe_softc *sc); 1659255736Sdavidch int (*init_hw_func)(struct bxe_softc *sc); 1660255736Sdavidch 1661255736Sdavidch /* Reset Function HW: Common, Port, Function phases. */ 1662255736Sdavidch void (*reset_hw_cmn)(struct bxe_softc *sc); 1663255736Sdavidch void (*reset_hw_port)(struct bxe_softc *sc); 1664255736Sdavidch void (*reset_hw_func)(struct bxe_softc *sc); 1665255736Sdavidch 1666255736Sdavidch /* Init/Free GUNZIP resources */ 1667255736Sdavidch int (*gunzip_init)(struct bxe_softc *sc); 1668255736Sdavidch void (*gunzip_end)(struct bxe_softc *sc); 1669255736Sdavidch 1670255736Sdavidch /* Prepare/Release FW resources */ 1671255736Sdavidch int (*init_fw)(struct bxe_softc *sc); 1672255736Sdavidch void (*release_fw)(struct bxe_softc *sc); 1673255736Sdavidch}; 1674255736Sdavidch 1675255736Sdavidchstruct ecore_func_sp_obj { 1676255736Sdavidch enum ecore_func_state state, next_state; 1677255736Sdavidch 1678255736Sdavidch /* ECORE_FUNC_CMD_XX bits. This object implements "one 1679255736Sdavidch * pending" paradigm but for debug and tracing purposes it's 1680255736Sdavidch * more convenient to have different bits for different 1681255736Sdavidch * commands. 1682255736Sdavidch */ 1683255736Sdavidch unsigned long pending; 1684255736Sdavidch 1685255736Sdavidch /* Buffer to use as a ramrod data and its mapping */ 1686255736Sdavidch void *rdata; 1687255736Sdavidch ecore_dma_addr_t rdata_mapping; 1688255736Sdavidch 1689255736Sdavidch /* Buffer to use as a afex ramrod data and its mapping. 1690255736Sdavidch * This can't be same rdata as above because afex ramrod requests 1691255736Sdavidch * can arrive to the object in parallel to other ramrod requests. 1692255736Sdavidch */ 1693255736Sdavidch void *afex_rdata; 1694255736Sdavidch ecore_dma_addr_t afex_rdata_mapping; 1695255736Sdavidch 1696255736Sdavidch /* this mutex validates that when pending flag is taken, the next 1697255736Sdavidch * ramrod to be sent will be the one set the pending bit 1698255736Sdavidch */ 1699255736Sdavidch ECORE_MUTEX one_pending_mutex; 1700255736Sdavidch 1701255736Sdavidch /* Driver interface */ 1702255736Sdavidch struct ecore_func_sp_drv_ops *drv; 1703255736Sdavidch 1704255736Sdavidch /** 1705255736Sdavidch * Performs one state change according to the given parameters. 1706255736Sdavidch * 1707255736Sdavidch * @return 0 in case of success and negative value otherwise. 1708255736Sdavidch */ 1709255736Sdavidch int (*send_cmd)(struct bxe_softc *sc, 1710255736Sdavidch struct ecore_func_state_params *params); 1711255736Sdavidch 1712255736Sdavidch /** 1713255736Sdavidch * Checks that the requested state transition is legal. 1714255736Sdavidch */ 1715255736Sdavidch int (*check_transition)(struct bxe_softc *sc, 1716255736Sdavidch struct ecore_func_sp_obj *o, 1717255736Sdavidch struct ecore_func_state_params *params); 1718255736Sdavidch 1719255736Sdavidch /** 1720255736Sdavidch * Completes the pending command. 1721255736Sdavidch */ 1722255736Sdavidch int (*complete_cmd)(struct bxe_softc *sc, 1723255736Sdavidch struct ecore_func_sp_obj *o, 1724255736Sdavidch enum ecore_func_cmd cmd); 1725255736Sdavidch 1726255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, struct ecore_func_sp_obj *o, 1727255736Sdavidch enum ecore_func_cmd cmd); 1728255736Sdavidch}; 1729255736Sdavidch 1730255736Sdavidch/********************** Interfaces ********************************************/ 1731255736Sdavidch/* Queueable objects set */ 1732255736Sdavidchunion ecore_qable_obj { 1733255736Sdavidch struct ecore_vlan_mac_obj vlan_mac; 1734255736Sdavidch}; 1735255736Sdavidch/************** Function state update *********/ 1736255736Sdavidchvoid ecore_init_func_obj(struct bxe_softc *sc, 1737255736Sdavidch struct ecore_func_sp_obj *obj, 1738255736Sdavidch void *rdata, ecore_dma_addr_t rdata_mapping, 1739255736Sdavidch void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, 1740255736Sdavidch struct ecore_func_sp_drv_ops *drv_iface); 1741255736Sdavidch 1742255736Sdavidchint ecore_func_state_change(struct bxe_softc *sc, 1743255736Sdavidch struct ecore_func_state_params *params); 1744255736Sdavidch 1745255736Sdavidchenum ecore_func_state ecore_func_get_state(struct bxe_softc *sc, 1746255736Sdavidch struct ecore_func_sp_obj *o); 1747255736Sdavidch/******************* Queue State **************/ 1748255736Sdavidchvoid ecore_init_queue_obj(struct bxe_softc *sc, 1749255736Sdavidch struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids, 1750255736Sdavidch uint8_t cid_cnt, uint8_t func_id, void *rdata, 1751255736Sdavidch ecore_dma_addr_t rdata_mapping, unsigned long type); 1752255736Sdavidch 1753255736Sdavidchint ecore_queue_state_change(struct bxe_softc *sc, 1754255736Sdavidch struct ecore_queue_state_params *params); 1755255736Sdavidch 1756255736Sdavidchint ecore_get_q_logical_state(struct bxe_softc *sc, 1757255736Sdavidch struct ecore_queue_sp_obj *obj); 1758255736Sdavidch 1759255736Sdavidch/********************* VLAN-MAC ****************/ 1760255736Sdavidchvoid ecore_init_mac_obj(struct bxe_softc *sc, 1761255736Sdavidch struct ecore_vlan_mac_obj *mac_obj, 1762255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 1763255736Sdavidch ecore_dma_addr_t rdata_mapping, int state, 1764255736Sdavidch unsigned long *pstate, ecore_obj_type type, 1765255736Sdavidch struct ecore_credit_pool_obj *macs_pool); 1766255736Sdavidch 1767255736Sdavidchvoid ecore_init_vlan_obj(struct bxe_softc *sc, 1768255736Sdavidch struct ecore_vlan_mac_obj *vlan_obj, 1769255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 1770255736Sdavidch ecore_dma_addr_t rdata_mapping, int state, 1771255736Sdavidch unsigned long *pstate, ecore_obj_type type, 1772255736Sdavidch struct ecore_credit_pool_obj *vlans_pool); 1773255736Sdavidch 1774255736Sdavidchvoid ecore_init_vlan_mac_obj(struct bxe_softc *sc, 1775255736Sdavidch struct ecore_vlan_mac_obj *vlan_mac_obj, 1776255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 1777255736Sdavidch ecore_dma_addr_t rdata_mapping, int state, 1778255736Sdavidch unsigned long *pstate, ecore_obj_type type, 1779255736Sdavidch struct ecore_credit_pool_obj *macs_pool, 1780255736Sdavidch struct ecore_credit_pool_obj *vlans_pool); 1781255736Sdavidch 1782255736Sdavidchint ecore_vlan_mac_h_read_lock(struct bxe_softc *sc, 1783255736Sdavidch struct ecore_vlan_mac_obj *o); 1784255736Sdavidchvoid ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc, 1785255736Sdavidch struct ecore_vlan_mac_obj *o); 1786255736Sdavidchint ecore_vlan_mac_h_write_lock(struct bxe_softc *sc, 1787255736Sdavidch struct ecore_vlan_mac_obj *o); 1788255736Sdavidchvoid ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc, 1789255736Sdavidch struct ecore_vlan_mac_obj *o); 1790255736Sdavidchint ecore_config_vlan_mac(struct bxe_softc *sc, 1791255736Sdavidch struct ecore_vlan_mac_ramrod_params *p); 1792255736Sdavidch 1793255736Sdavidchint ecore_vlan_mac_move(struct bxe_softc *sc, 1794255736Sdavidch struct ecore_vlan_mac_ramrod_params *p, 1795255736Sdavidch struct ecore_vlan_mac_obj *dest_o); 1796255736Sdavidch 1797255736Sdavidch/********************* RX MODE ****************/ 1798255736Sdavidch 1799255736Sdavidchvoid ecore_init_rx_mode_obj(struct bxe_softc *sc, 1800255736Sdavidch struct ecore_rx_mode_obj *o); 1801255736Sdavidch 1802255736Sdavidch/** 1803255736Sdavidch * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. 1804255736Sdavidch * 1805255736Sdavidch * @p: Command parameters 1806255736Sdavidch * 1807255736Sdavidch * Return: 0 - if operation was successful and there is no pending completions, 1808255736Sdavidch * positive number - if there are pending completions, 1809255736Sdavidch * negative - if there were errors 1810255736Sdavidch */ 1811255736Sdavidchint ecore_config_rx_mode(struct bxe_softc *sc, 1812255736Sdavidch struct ecore_rx_mode_ramrod_params *p); 1813255736Sdavidch 1814255736Sdavidch/****************** MULTICASTS ****************/ 1815255736Sdavidch 1816255736Sdavidchvoid ecore_init_mcast_obj(struct bxe_softc *sc, 1817255736Sdavidch struct ecore_mcast_obj *mcast_obj, 1818255736Sdavidch uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id, 1819255736Sdavidch uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping, 1820255736Sdavidch int state, unsigned long *pstate, 1821255736Sdavidch ecore_obj_type type); 1822255736Sdavidch 1823255736Sdavidch/** 1824255736Sdavidch * ecore_config_mcast - Configure multicast MACs list. 1825255736Sdavidch * 1826255736Sdavidch * @cmd: command to execute: BNX2X_MCAST_CMD_X 1827255736Sdavidch * 1828255736Sdavidch * May configure a new list 1829255736Sdavidch * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up 1830255736Sdavidch * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current 1831255736Sdavidch * configuration, continue to execute the pending commands 1832255736Sdavidch * (ECORE_MCAST_CMD_CONT). 1833255736Sdavidch * 1834255736Sdavidch * If previous command is still pending or if number of MACs to 1835255736Sdavidch * configure is more that maximum number of MACs in one command, 1836255736Sdavidch * the current command will be enqueued to the tail of the 1837255736Sdavidch * pending commands list. 1838255736Sdavidch * 1839255736Sdavidch * Return: 0 is operation was successfull and there are no pending completions, 1840255736Sdavidch * negative if there were errors, positive if there are pending 1841255736Sdavidch * completions. 1842255736Sdavidch */ 1843255736Sdavidchint ecore_config_mcast(struct bxe_softc *sc, 1844255736Sdavidch struct ecore_mcast_ramrod_params *p, 1845255736Sdavidch enum ecore_mcast_cmd cmd); 1846255736Sdavidch 1847255736Sdavidch/****************** CREDIT POOL ****************/ 1848255736Sdavidchvoid ecore_init_mac_credit_pool(struct bxe_softc *sc, 1849255736Sdavidch struct ecore_credit_pool_obj *p, uint8_t func_id, 1850255736Sdavidch uint8_t func_num); 1851255736Sdavidchvoid ecore_init_vlan_credit_pool(struct bxe_softc *sc, 1852255736Sdavidch struct ecore_credit_pool_obj *p, uint8_t func_id, 1853255736Sdavidch uint8_t func_num); 1854255736Sdavidch 1855255736Sdavidch/****************** RSS CONFIGURATION ****************/ 1856255736Sdavidchvoid ecore_init_rss_config_obj(struct bxe_softc *sc, 1857255736Sdavidch struct ecore_rss_config_obj *rss_obj, 1858255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id, 1859255736Sdavidch void *rdata, ecore_dma_addr_t rdata_mapping, 1860255736Sdavidch int state, unsigned long *pstate, 1861255736Sdavidch ecore_obj_type type); 1862255736Sdavidch 1863255736Sdavidch/** 1864255736Sdavidch * ecore_config_rss - Updates RSS configuration according to provided parameters 1865255736Sdavidch * 1866255736Sdavidch * Return: 0 in case of success 1867255736Sdavidch */ 1868255736Sdavidchint ecore_config_rss(struct bxe_softc *sc, 1869255736Sdavidch struct ecore_config_rss_params *p); 1870255736Sdavidch 1871255736Sdavidch/** 1872255736Sdavidch * ecore_get_rss_ind_table - Return the current ind_table configuration. 1873255736Sdavidch * 1874255736Sdavidch * @ind_table: buffer to fill with the current indirection 1875255736Sdavidch * table content. Should be at least 1876255736Sdavidch * T_ETH_INDIRECTION_TABLE_SIZE bytes long. 1877255736Sdavidch */ 1878255736Sdavidchvoid ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj, 1879255736Sdavidch uint8_t *ind_table); 1880255736Sdavidch 1881258203Sedavis/* set as inline so printout will show the offending function */ 1882258203Sedavisint validate_vlan_mac(struct bxe_softc *sc, 1883258203Sedavis struct ecore_vlan_mac_obj *vlan_mac); 1884255736Sdavidch 1885255736Sdavidch#endif /* ECORE_SP_H */ 1886255736Sdavidch 1887