1255736Sdavidch/*- 2255736Sdavidch * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved. 3255736Sdavidch * 4255736Sdavidch * Eric Davis <edavis@broadcom.com> 5255736Sdavidch * David Christensen <davidch@broadcom.com> 6255736Sdavidch * Gary Zambrano <zambrano@broadcom.com> 7255736Sdavidch * 8255736Sdavidch * Redistribution and use in source and binary forms, with or without 9255736Sdavidch * modification, are permitted provided that the following conditions 10255736Sdavidch * are met: 11255736Sdavidch * 12255736Sdavidch * 1. Redistributions of source code must retain the above copyright 13255736Sdavidch * notice, this list of conditions and the following disclaimer. 14255736Sdavidch * 2. Redistributions in binary form must reproduce the above copyright 15255736Sdavidch * notice, this list of conditions and the following disclaimer in the 16255736Sdavidch * documentation and/or other materials provided with the distribution. 17255736Sdavidch * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18255736Sdavidch * may be used to endorse or promote products derived from this software 19255736Sdavidch * without specific prior written consent. 20255736Sdavidch * 21255736Sdavidch * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22255736Sdavidch * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23255736Sdavidch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24255736Sdavidch * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25255736Sdavidch * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26255736Sdavidch * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27255736Sdavidch * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28255736Sdavidch * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29255736Sdavidch * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30255736Sdavidch * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31255736Sdavidch * THE POSSIBILITY OF SUCH DAMAGE. 32255736Sdavidch */ 33255736Sdavidch 34255736Sdavidch#include <sys/cdefs.h> 35255736Sdavidch__FBSDID("$FreeBSD$"); 36255736Sdavidch 37255736Sdavidch#ifndef ECORE_SP_H 38255736Sdavidch#define ECORE_SP_H 39255736Sdavidch 40255736Sdavidch 41255736Sdavidch#include <sys/types.h> 42255736Sdavidch#include <sys/endian.h> 43255736Sdavidch#include <sys/param.h> 44255736Sdavidch#include <sys/lock.h> 45255736Sdavidch#include <sys/mutex.h> 46255736Sdavidch#include <sys/malloc.h> 47255736Sdavidch#include <sys/kernel.h> 48255736Sdavidch#include <machine/bus.h> 49255736Sdavidch#include <net/ethernet.h> 50255736Sdavidch 51255736Sdavidch#if _BYTE_ORDER == _LITTLE_ENDIAN 52255736Sdavidch#ifndef LITTLE_ENDIAN 53255736Sdavidch#define LITTLE_ENDIAN 54255736Sdavidch#endif 55255736Sdavidch#ifndef __LITTLE_ENDIAN 56255736Sdavidch#define __LITTLE_ENDIAN 57255736Sdavidch#endif 58255736Sdavidch#undef BIG_ENDIAN 59255736Sdavidch#undef __BIG_ENDIAN 60255736Sdavidch#else /* _BIG_ENDIAN */ 61255736Sdavidch#ifndef BIG_ENDIAN 62255736Sdavidch#define BIG_ENDIAN 63255736Sdavidch#endif 64255736Sdavidch#ifndef __BIG_ENDIAN 65255736Sdavidch#define __BIG_ENDIAN 66255736Sdavidch#endif 67255736Sdavidch#undef LITTLE_ENDIAN 68255736Sdavidch#undef __LITTLE_ENDIAN 69255736Sdavidch#endif 70255736Sdavidch 71255736Sdavidch#include "ecore_mfw_req.h" 72255736Sdavidch#include "ecore_fw_defs.h" 73255736Sdavidch#include "ecore_hsi.h" 74255736Sdavidch#include "ecore_reg.h" 75255736Sdavidch 76255736Sdavidchstruct bxe_softc; 77255736Sdavidchtypedef bus_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */ 78255736Sdavidchtypedef volatile int ecore_atomic_t; 79255736Sdavidch 80256319Sedavis#ifndef __bool_true_false_are_defined 81256319Sedavis#ifndef __cplusplus 82256319Sedavis#define bool _Bool 83256319Sedavis#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER) 84256319Sedavistypedef _Bool bool; 85255736Sdavidch#endif 86256319Sedavis#endif /* !__cplusplus */ 87256319Sedavis#endif /* !__bool_true_false_are_defined$ */ 88255736Sdavidch 89255736Sdavidch#define ETH_ALEN ETHER_ADDR_LEN /* 6 */ 90255736Sdavidch 91255736Sdavidch#define ECORE_SWCID_SHIFT 17 92255736Sdavidch#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1) 93255736Sdavidch 94255736Sdavidch#define ECORE_MC_HASH_SIZE 8 95255736Sdavidch#define ECORE_MC_HASH_OFFSET(sc, i) \ 96255736Sdavidch (BAR_TSTRORM_INTMEM + \ 97255736Sdavidch TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4) 98255736Sdavidch 99255736Sdavidch#define ECORE_MAX_MULTICAST 64 100255736Sdavidch#define ECORE_MAX_EMUL_MULTI 1 101255736Sdavidch 102255736Sdavidch#define IRO sc->iro_array 103255736Sdavidch 104255736Sdavidchtypedef struct mtx ECORE_MUTEX; 105255736Sdavidch#define ECORE_MUTEX_INIT(_mutex) \ 106255736Sdavidch mtx_init(_mutex, "ecore_lock", "ECORE Lock", MTX_DEF) 107255736Sdavidch#define ECORE_MUTEX_LOCK(_mutex) mtx_lock(_mutex) 108255736Sdavidch#define ECORE_MUTEX_UNLOCK(_mutex) mtx_unlock(_mutex) 109255736Sdavidch 110255736Sdavidchtypedef struct mtx ECORE_MUTEX_SPIN; 111255736Sdavidch#define ECORE_SPIN_LOCK_INIT(_spin, _sc) \ 112255736Sdavidch mtx_init(_spin, "ecore_lock", "ECORE Lock", MTX_DEF) 113255736Sdavidch#define ECORE_SPIN_LOCK_BH(_spin) mtx_lock(_spin) /* bh = bottom-half */ 114255736Sdavidch#define ECORE_SPIN_UNLOCK_BH(_spin) mtx_unlock(_spin) /* bh = bottom-half */ 115255736Sdavidch 116255736Sdavidch#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb() 117255736Sdavidch#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb() 118255736Sdavidch#define ECORE_SMP_MB() mb() 119255736Sdavidch#define ECORE_SMP_RMB() rmb() 120255736Sdavidch#define ECORE_SMP_WMB() wmb() 121255736Sdavidch#define ECORE_MMIOWB() wmb() 122255736Sdavidch 123255736Sdavidch#define ECORE_SET_BIT_NA(bit, var) bit_set(var, bit) /* non-atomic */ 124255736Sdavidch#define ECORE_CLEAR_BIT_NA(bit, var) bit_clear(var, bit) /* non-atomic */ 125255736Sdavidch#define ECORE_TEST_BIT(bit, var) bxe_test_bit(bit, var) 126255736Sdavidch#define ECORE_SET_BIT(bit, var) bxe_set_bit(bit, var) 127255736Sdavidch#define ECORE_CLEAR_BIT(bit, var) bxe_clear_bit(bit, var) 128255736Sdavidch#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bxe_test_and_clear_bit(bit, var) 129255736Sdavidch 130255736Sdavidch#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a) 131255736Sdavidch#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v) 132255736Sdavidch#define ECORE_ATOMIC_CMPXCHG(a, o, n) bxe_cmpxchg((volatile int *)a, o, n) 133255736Sdavidch 134255736Sdavidch#define ECORE_RET_PENDING(pending_bit, pending) \ 135255736Sdavidch (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS) 136255736Sdavidch 137255736Sdavidch#define ECORE_SET_FLAG(value, mask, flag) \ 138255736Sdavidch do { \ 139255736Sdavidch (value) &= ~(mask); \ 140255736Sdavidch (value) |= ((flag) << (mask##_SHIFT)); \ 141255736Sdavidch } while (0) 142255736Sdavidch 143255736Sdavidch#define ECORE_GET_FLAG(value, mask) \ 144255736Sdavidch (((value) &= (mask)) >> (mask##_SHIFT)) 145255736Sdavidch 146255736Sdavidch#define ECORE_MIGHT_SLEEP() 147255736Sdavidch 148255736Sdavidch#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id) 149255736Sdavidch 150255736Sdavidch#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s) 151255736Sdavidch#define ECORE_MEMCPY(_a, _b, _s) memcpy(_a, _b, _s) 152255736Sdavidch#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s) 153255736Sdavidch 154255736Sdavidch#define ECORE_CPU_TO_LE16(x) htole16(x) 155255736Sdavidch#define ECORE_CPU_TO_LE32(x) htole32(x) 156255736Sdavidch 157255736Sdavidch#define ECORE_WAIT(_s, _t) DELAY(1000) 158255736Sdavidch#define ECORE_MSLEEP(_t) DELAY((_t) * 1000) 159255736Sdavidch 160255736Sdavidch#define ECORE_LIKELY(x) __predict_true(x) 161255736Sdavidch#define ECORE_UNLIKELY(x) __predict_false(x) 162255736Sdavidch 163255736Sdavidch#define ECORE_ZALLOC(_size, _flags, _sc) \ 164255736Sdavidch malloc(_size, M_TEMP, (M_NOWAIT | M_ZERO)) 165255736Sdavidch 166255736Sdavidch#define ECORE_CALLOC(_len, _size, _flags, _sc) \ 167255736Sdavidch malloc(_len * _size, M_TEMP, (M_NOWAIT | M_ZERO)) 168255736Sdavidch 169255736Sdavidch#define ECORE_FREE(_s, _buf, _size) free(_buf, M_TEMP) 170255736Sdavidch 171255736Sdavidch#define SC_ILT(sc) ((sc)->ilt) 172255736Sdavidch#define ILOG2(x) bxe_ilog2(x) 173255736Sdavidch 174255736Sdavidch#define ECORE_ILT_ZALLOC(x, y, size) \ 175255736Sdavidch do { \ 176255736Sdavidch x = malloc(sizeof(struct bxe_dma), M_DEVBUF, (M_NOWAIT | M_ZERO)); \ 177255736Sdavidch if (x) { \ 178255736Sdavidch if (bxe_dma_alloc((struct bxe_softc *)sc, \ 179255736Sdavidch size, (struct bxe_dma *)x, \ 180255736Sdavidch "ECORE_ILT") != 0) { \ 181255736Sdavidch free(x, M_DEVBUF); \ 182255736Sdavidch x = NULL; \ 183255736Sdavidch *y = 0; \ 184255736Sdavidch } else { \ 185255736Sdavidch *y = ((struct bxe_dma *)x)->paddr; \ 186255736Sdavidch } \ 187255736Sdavidch } \ 188255736Sdavidch } while (0) 189255736Sdavidch 190255736Sdavidch#define ECORE_ILT_FREE(x, y, size) \ 191255736Sdavidch do { \ 192255736Sdavidch if (x) { \ 193255736Sdavidch bxe_dma_free((struct bxe_softc *)sc, x); \ 194255736Sdavidch free(x, M_DEVBUF); \ 195255736Sdavidch x = NULL; \ 196255736Sdavidch y = 0; \ 197255736Sdavidch } \ 198255736Sdavidch } while (0) 199255736Sdavidch 200255736Sdavidch#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE 201255736Sdavidch 202255736Sdavidch#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE 203255736Sdavidch#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE 204255736Sdavidch#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE 205255736Sdavidch 206255736Sdavidch#define ECORE_SET_CTX_VALIDATION bxe_set_ctx_validation 207255736Sdavidch 208255736Sdavidch#define ECORE_UPDATE_COALESCE_SB_INDEX bxe_update_coalesce_sb_index 209255736Sdavidch 210255736Sdavidch#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a)) 211255736Sdavidch 212255736Sdavidch#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN 213255736Sdavidch 214255736Sdavidch#define ECORE_PATH_ID SC_PATH 215255736Sdavidch#define ECORE_PORT_ID SC_PORT 216255736Sdavidch#define ECORE_FUNC_ID SC_FUNC 217255736Sdavidch#define ECORE_ABS_FUNC_ID SC_ABS_FUNC 218255736Sdavidch 219255736Sdavidchuint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, 220255736Sdavidch uint32_t crc32_seed, uint8_t complement); 221255736Sdavidchstatic inline uint32_t 222255736SdavidchECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len) 223255736Sdavidch{ 224255736Sdavidch uint32_t packet_buf[2] = {0}; 225255736Sdavidch memcpy(((uint8_t *)(&packet_buf[0]))+2, &mac[0], 2); 226255736Sdavidch memcpy(&packet_buf[1], &mac[2], 4); 227255736Sdavidch return bswap32(calc_crc32((uint8_t *)packet_buf, 8, seed, 0)); 228255736Sdavidch} 229255736Sdavidch 230255736Sdavidch#define ecore_sp_post(_sc, _a, _b, _c, _d) \ 231255736Sdavidch bxe_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d) 232255736Sdavidch 233255736Sdavidch#define ECORE_DBG_BREAK_IF(exp) \ 234255736Sdavidch do { \ 235255736Sdavidch if (__predict_false(exp)) { \ 236255736Sdavidch panic("ECORE"); \ 237255736Sdavidch } \ 238255736Sdavidch } while (0) 239255736Sdavidch 240255736Sdavidch#define ECORE_BUG() \ 241255736Sdavidch do { \ 242255736Sdavidch panic("BUG (%s:%d)", __FILE__, __LINE__); \ 243255736Sdavidch } while(0); 244255736Sdavidch 245255736Sdavidch#define ECORE_BUG_ON(exp) \ 246255736Sdavidch do { \ 247255736Sdavidch if (__predict_true(exp)) { \ 248255736Sdavidch panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \ 249255736Sdavidch } \ 250255736Sdavidch } while (0) 251255736Sdavidch 252255736Sdavidch#define ECORE_ERR(str, ...) \ 253255736Sdavidch BLOGE(sc, "ECORE: " str, ##__VA_ARGS__) 254255736Sdavidch 255255736Sdavidch#define DBG_SP 0x00000004 /* defined in bxe.h */ 256255736Sdavidch 257255736Sdavidch#define ECORE_MSG(sc, m, ...) \ 258255736Sdavidch BLOGD(sc, DBG_SP, "ECORE: " m, ##__VA_ARGS__) 259255736Sdavidch 260255736Sdavidchtypedef struct _ecore_list_entry_t 261255736Sdavidch{ 262255736Sdavidch struct _ecore_list_entry_t *next, *prev; 263255736Sdavidch} ecore_list_entry_t; 264255736Sdavidch 265255736Sdavidchtypedef struct ecore_list_t 266255736Sdavidch{ 267255736Sdavidch ecore_list_entry_t *head, *tail; 268255736Sdavidch unsigned long cnt; 269255736Sdavidch} ecore_list_t; 270255736Sdavidch 271255736Sdavidch/* initialize the list */ 272255736Sdavidch#define ECORE_LIST_INIT(_list) \ 273255736Sdavidch do { \ 274255736Sdavidch (_list)->head = NULL; \ 275255736Sdavidch (_list)->tail = NULL; \ 276255736Sdavidch (_list)->cnt = 0; \ 277255736Sdavidch } while (0) 278255736Sdavidch 279255736Sdavidch/* return TRUE if the element is the last on the list */ 280255736Sdavidch#define ECORE_LIST_IS_LAST(_elem, _list) \ 281255736Sdavidch (_elem == (_list)->tail) 282255736Sdavidch 283255736Sdavidch/* return TRUE if the list is empty */ 284255736Sdavidch#define ECORE_LIST_IS_EMPTY(_list) \ 285255736Sdavidch ((_list)->cnt == 0) 286255736Sdavidch 287255736Sdavidch/* return the first element */ 288255736Sdavidch#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \ 289255736Sdavidch (cast *)((_list)->head) 290255736Sdavidch 291255736Sdavidch/* return the next element */ 292255736Sdavidch#define ECORE_LIST_NEXT(_elem, _link, cast) \ 293255736Sdavidch (cast *)((&((_elem)->_link))->next) 294255736Sdavidch 295255736Sdavidch/* push an element on the head of the list */ 296255736Sdavidch#define ECORE_LIST_PUSH_HEAD(_elem, _list) \ 297255736Sdavidch do { \ 298255736Sdavidch (_elem)->prev = (ecore_list_entry_t *)0; \ 299255736Sdavidch (_elem)->next = (_list)->head; \ 300255736Sdavidch if ((_list)->tail == (ecore_list_entry_t *)0) { \ 301255736Sdavidch (_list)->tail = (_elem); \ 302255736Sdavidch } else { \ 303255736Sdavidch (_list)->head->prev = (_elem); \ 304255736Sdavidch } \ 305255736Sdavidch (_list)->head = (_elem); \ 306255736Sdavidch (_list)->cnt++; \ 307255736Sdavidch } while (0) 308255736Sdavidch 309255736Sdavidch/* push an element on the tail of the list */ 310255736Sdavidch#define ECORE_LIST_PUSH_TAIL(_elem, _list) \ 311255736Sdavidch do { \ 312255736Sdavidch (_elem)->next = (ecore_list_entry_t *)0; \ 313255736Sdavidch (_elem)->prev = (_list)->tail; \ 314255736Sdavidch if ((_list)->tail) { \ 315255736Sdavidch (_list)->tail->next = (_elem); \ 316255736Sdavidch } else { \ 317255736Sdavidch (_list)->head = (_elem); \ 318255736Sdavidch } \ 319255736Sdavidch (_list)->tail = (_elem); \ 320255736Sdavidch (_list)->cnt++; \ 321255736Sdavidch } while (0) 322255736Sdavidch 323255736Sdavidch/* push list1 on the head of list2 and return with list1 as empty */ 324255736Sdavidch#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \ 325255736Sdavidch do { \ 326255736Sdavidch (_list1)->tail->next = (_list2)->head; \ 327255736Sdavidch if ((_list2)->head) { \ 328255736Sdavidch (_list2)->head->prev = (_list1)->tail; \ 329255736Sdavidch } else { \ 330255736Sdavidch (_list2)->tail = (_list1)->tail; \ 331255736Sdavidch } \ 332255736Sdavidch (_list2)->head = (_list1)->head; \ 333255736Sdavidch (_list2)->cnt += (_list1)->cnt; \ 334255736Sdavidch (_list1)->head = NULL; \ 335255736Sdavidch (_list1)->tail = NULL; \ 336255736Sdavidch (_list1)->cnt = 0; \ 337255736Sdavidch } while (0) 338255736Sdavidch 339255736Sdavidch/* remove an element from the list */ 340255736Sdavidch#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \ 341255736Sdavidch do { \ 342255736Sdavidch if ((_list)->head == (_elem)) { \ 343255736Sdavidch if ((_list)->head) { \ 344255736Sdavidch (_list)->head = (_list)->head->next; \ 345255736Sdavidch if ((_list)->head) { \ 346255736Sdavidch (_list)->head->prev = (ecore_list_entry_t *)0; \ 347255736Sdavidch } else { \ 348255736Sdavidch (_list)->tail = (ecore_list_entry_t *)0; \ 349255736Sdavidch } \ 350255736Sdavidch (_list)->cnt--; \ 351255736Sdavidch } \ 352255736Sdavidch } else if ((_list)->tail == (_elem)) { \ 353255736Sdavidch if ((_list)->tail) { \ 354255736Sdavidch (_list)->tail = (_list)->tail->prev; \ 355255736Sdavidch if ((_list)->tail) { \ 356255736Sdavidch (_list)->tail->next = (ecore_list_entry_t *)0; \ 357255736Sdavidch } else { \ 358255736Sdavidch (_list)->head = (ecore_list_entry_t *)0; \ 359255736Sdavidch } \ 360255736Sdavidch (_list)->cnt--; \ 361255736Sdavidch } \ 362255736Sdavidch } else { \ 363255736Sdavidch (_elem)->prev->next = (_elem)->next; \ 364255736Sdavidch (_elem)->next->prev = (_elem)->prev; \ 365255736Sdavidch (_list)->cnt--; \ 366255736Sdavidch } \ 367255736Sdavidch } while (0) 368255736Sdavidch 369255736Sdavidch/* walk the list */ 370255736Sdavidch#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \ 371255736Sdavidch for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \ 372255736Sdavidch pos; \ 373255736Sdavidch pos = ECORE_LIST_NEXT(pos, _link, cast)) 374255736Sdavidch 375255736Sdavidch/* walk the list (safely) */ 376255736Sdavidch#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \ 377255736Sdavidch for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \ 378255736Sdavidch n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \ 379255736Sdavidch pos != NULL; \ 380255736Sdavidch pos = (cast *)n, \ 381255736Sdavidch n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL) 382255736Sdavidch 383255736Sdavidch 384255736Sdavidch/* Manipulate a bit vector defined as an array of uint64_t */ 385255736Sdavidch 386255736Sdavidch/* Number of bits in one sge_mask array element */ 387255736Sdavidch#define BIT_VEC64_ELEM_SZ 64 388255736Sdavidch#define BIT_VEC64_ELEM_SHIFT 6 389255736Sdavidch#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1) 390255736Sdavidch 391255736Sdavidch#define __BIT_VEC64_SET_BIT(el, bit) \ 392255736Sdavidch do { \ 393255736Sdavidch el = ((el) | ((uint64_t)0x1 << (bit))); \ 394255736Sdavidch } while (0) 395255736Sdavidch 396255736Sdavidch#define __BIT_VEC64_CLEAR_BIT(el, bit) \ 397255736Sdavidch do { \ 398255736Sdavidch el = ((el) & (~((uint64_t)0x1 << (bit)))); \ 399255736Sdavidch } while (0) 400255736Sdavidch 401255736Sdavidch#define BIT_VEC64_SET_BIT(vec64, idx) \ 402255736Sdavidch __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ 403255736Sdavidch (idx) & BIT_VEC64_ELEM_MASK) 404255736Sdavidch 405255736Sdavidch#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ 406255736Sdavidch __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ 407255736Sdavidch (idx) & BIT_VEC64_ELEM_MASK) 408255736Sdavidch 409255736Sdavidch#define BIT_VEC64_TEST_BIT(vec64, idx) \ 410255736Sdavidch (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ 411255736Sdavidch ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) 412255736Sdavidch 413255736Sdavidch/* 414255736Sdavidch * Creates a bitmask of all ones in less significant bits. 415255736Sdavidch * idx - index of the most significant bit in the created mask 416255736Sdavidch */ 417255736Sdavidch#define BIT_VEC64_ONES_MASK(idx) \ 418255736Sdavidch (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) 419255736Sdavidch#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0)) 420255736Sdavidch 421255736Sdavidch/* fill in a MAC address the way the FW likes it */ 422255736Sdavidchstatic inline void 423255736Sdavidchecore_set_fw_mac_addr(uint16_t *fw_hi, 424255736Sdavidch uint16_t *fw_mid, 425255736Sdavidch uint16_t *fw_lo, 426255736Sdavidch uint8_t *mac) 427255736Sdavidch{ 428255736Sdavidch ((uint8_t *)fw_hi)[0] = mac[1]; 429255736Sdavidch ((uint8_t *)fw_hi)[1] = mac[0]; 430255736Sdavidch ((uint8_t *)fw_mid)[0] = mac[3]; 431255736Sdavidch ((uint8_t *)fw_mid)[1] = mac[2]; 432255736Sdavidch ((uint8_t *)fw_lo)[0] = mac[5]; 433255736Sdavidch ((uint8_t *)fw_lo)[1] = mac[4]; 434255736Sdavidch} 435255736Sdavidch 436255736Sdavidch 437255736Sdavidchenum ecore_status_t { 438255736Sdavidch ECORE_EXISTS = -6, 439255736Sdavidch ECORE_IO = -5, 440255736Sdavidch ECORE_TIMEOUT = -4, 441255736Sdavidch ECORE_INVAL = -3, 442255736Sdavidch ECORE_BUSY = -2, 443255736Sdavidch ECORE_NOMEM = -1, 444255736Sdavidch ECORE_SUCCESS = 0, 445255736Sdavidch /* PENDING is not an error and should be positive */ 446255736Sdavidch ECORE_PENDING = 1, 447255736Sdavidch}; 448255736Sdavidch 449255736Sdavidchenum { 450255736Sdavidch SWITCH_UPDATE, 451255736Sdavidch AFEX_UPDATE, 452255736Sdavidch}; 453255736Sdavidch 454255736Sdavidch 455255736Sdavidch 456255736Sdavidch 457255736Sdavidchstruct bxe_softc; 458255736Sdavidchstruct eth_context; 459255736Sdavidch 460255736Sdavidch/* Bits representing general command's configuration */ 461255736Sdavidchenum { 462255736Sdavidch RAMROD_TX, 463255736Sdavidch RAMROD_RX, 464255736Sdavidch /* Wait until all pending commands complete */ 465255736Sdavidch RAMROD_COMP_WAIT, 466255736Sdavidch /* Don't send a ramrod, only update a registry */ 467255736Sdavidch RAMROD_DRV_CLR_ONLY, 468255736Sdavidch /* Configure HW according to the current object state */ 469255736Sdavidch RAMROD_RESTORE, 470255736Sdavidch /* Execute the next command now */ 471255736Sdavidch RAMROD_EXEC, 472255736Sdavidch /* Don't add a new command and continue execution of posponed 473255736Sdavidch * commands. If not set a new command will be added to the 474255736Sdavidch * pending commands list. 475255736Sdavidch */ 476255736Sdavidch RAMROD_CONT, 477255736Sdavidch /* If there is another pending ramrod, wait until it finishes and 478255736Sdavidch * re-try to submit this one. This flag can be set only in sleepable 479255736Sdavidch * context, and should not be set from the context that completes the 480255736Sdavidch * ramrods as deadlock will occur. 481255736Sdavidch */ 482255736Sdavidch RAMROD_RETRY, 483255736Sdavidch}; 484255736Sdavidch 485255736Sdavidchtypedef enum { 486255736Sdavidch ECORE_OBJ_TYPE_RX, 487255736Sdavidch ECORE_OBJ_TYPE_TX, 488255736Sdavidch ECORE_OBJ_TYPE_RX_TX, 489255736Sdavidch} ecore_obj_type; 490255736Sdavidch 491255736Sdavidch/* Public slow path states */ 492255736Sdavidchenum { 493255736Sdavidch ECORE_FILTER_MAC_PENDING, 494255736Sdavidch ECORE_FILTER_VLAN_PENDING, 495255736Sdavidch ECORE_FILTER_VLAN_MAC_PENDING, 496255736Sdavidch ECORE_FILTER_RX_MODE_PENDING, 497255736Sdavidch ECORE_FILTER_RX_MODE_SCHED, 498255736Sdavidch ECORE_FILTER_ISCSI_ETH_START_SCHED, 499255736Sdavidch ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 500255736Sdavidch ECORE_FILTER_FCOE_ETH_START_SCHED, 501255736Sdavidch ECORE_FILTER_FCOE_ETH_STOP_SCHED, 502255736Sdavidch ECORE_FILTER_BYPASS_RX_MODE_PENDING, 503255736Sdavidch ECORE_FILTER_BYPASS_MAC_PENDING, 504255736Sdavidch ECORE_FILTER_BYPASS_RSS_CONF_PENDING, 505255736Sdavidch ECORE_FILTER_MCAST_PENDING, 506255736Sdavidch ECORE_FILTER_MCAST_SCHED, 507255736Sdavidch ECORE_FILTER_RSS_CONF_PENDING, 508255736Sdavidch ECORE_AFEX_FCOE_Q_UPDATE_PENDING, 509255736Sdavidch ECORE_AFEX_PENDING_VIFSET_MCP_ACK 510255736Sdavidch}; 511255736Sdavidch 512255736Sdavidchstruct ecore_raw_obj { 513255736Sdavidch uint8_t func_id; 514255736Sdavidch 515255736Sdavidch /* Queue params */ 516255736Sdavidch uint8_t cl_id; 517255736Sdavidch uint32_t cid; 518255736Sdavidch 519255736Sdavidch /* Ramrod data buffer params */ 520255736Sdavidch void *rdata; 521255736Sdavidch ecore_dma_addr_t rdata_mapping; 522255736Sdavidch 523255736Sdavidch /* Ramrod state params */ 524255736Sdavidch int state; /* "ramrod is pending" state bit */ 525255736Sdavidch unsigned long *pstate; /* pointer to state buffer */ 526255736Sdavidch 527255736Sdavidch ecore_obj_type obj_type; 528255736Sdavidch 529255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, 530255736Sdavidch struct ecore_raw_obj *o); 531255736Sdavidch 532255736Sdavidch bool (*check_pending)(struct ecore_raw_obj *o); 533255736Sdavidch void (*clear_pending)(struct ecore_raw_obj *o); 534255736Sdavidch void (*set_pending)(struct ecore_raw_obj *o); 535255736Sdavidch}; 536255736Sdavidch 537255736Sdavidch/************************* VLAN-MAC commands related parameters ***************/ 538255736Sdavidchstruct ecore_mac_ramrod_data { 539255736Sdavidch uint8_t mac[ETH_ALEN]; 540255736Sdavidch uint8_t is_inner_mac; 541255736Sdavidch}; 542255736Sdavidch 543255736Sdavidchstruct ecore_vlan_ramrod_data { 544255736Sdavidch uint16_t vlan; 545255736Sdavidch}; 546255736Sdavidch 547255736Sdavidchstruct ecore_vlan_mac_ramrod_data { 548255736Sdavidch uint8_t mac[ETH_ALEN]; 549255736Sdavidch uint8_t is_inner_mac; 550255736Sdavidch uint16_t vlan; 551255736Sdavidch}; 552255736Sdavidch 553255736Sdavidchunion ecore_classification_ramrod_data { 554255736Sdavidch struct ecore_mac_ramrod_data mac; 555255736Sdavidch struct ecore_vlan_ramrod_data vlan; 556255736Sdavidch struct ecore_vlan_mac_ramrod_data vlan_mac; 557255736Sdavidch}; 558255736Sdavidch 559255736Sdavidch/* VLAN_MAC commands */ 560255736Sdavidchenum ecore_vlan_mac_cmd { 561255736Sdavidch ECORE_VLAN_MAC_ADD, 562255736Sdavidch ECORE_VLAN_MAC_DEL, 563255736Sdavidch ECORE_VLAN_MAC_MOVE, 564255736Sdavidch}; 565255736Sdavidch 566255736Sdavidchstruct ecore_vlan_mac_data { 567255736Sdavidch /* Requested command: ECORE_VLAN_MAC_XX */ 568255736Sdavidch enum ecore_vlan_mac_cmd cmd; 569255736Sdavidch /* used to contain the data related vlan_mac_flags bits from 570255736Sdavidch * ramrod parameters. 571255736Sdavidch */ 572255736Sdavidch unsigned long vlan_mac_flags; 573255736Sdavidch 574255736Sdavidch /* Needed for MOVE command */ 575255736Sdavidch struct ecore_vlan_mac_obj *target_obj; 576255736Sdavidch 577255736Sdavidch union ecore_classification_ramrod_data u; 578255736Sdavidch}; 579255736Sdavidch 580255736Sdavidch/*************************** Exe Queue obj ************************************/ 581255736Sdavidchunion ecore_exe_queue_cmd_data { 582255736Sdavidch struct ecore_vlan_mac_data vlan_mac; 583255736Sdavidch 584255736Sdavidch struct { 585255736Sdavidch /* TODO */ 586255736Sdavidch } mcast; 587255736Sdavidch}; 588255736Sdavidch 589255736Sdavidchstruct ecore_exeq_elem { 590255736Sdavidch ecore_list_entry_t link; 591255736Sdavidch 592255736Sdavidch /* Length of this element in the exe_chunk. */ 593255736Sdavidch int cmd_len; 594255736Sdavidch 595255736Sdavidch union ecore_exe_queue_cmd_data cmd_data; 596255736Sdavidch}; 597255736Sdavidch 598255736Sdavidchunion ecore_qable_obj; 599255736Sdavidch 600255736Sdavidchunion ecore_exeq_comp_elem { 601255736Sdavidch union event_ring_elem *elem; 602255736Sdavidch}; 603255736Sdavidch 604255736Sdavidchstruct ecore_exe_queue_obj; 605255736Sdavidch 606255736Sdavidchtypedef int (*exe_q_validate)(struct bxe_softc *sc, 607255736Sdavidch union ecore_qable_obj *o, 608255736Sdavidch struct ecore_exeq_elem *elem); 609255736Sdavidch 610255736Sdavidchtypedef int (*exe_q_remove)(struct bxe_softc *sc, 611255736Sdavidch union ecore_qable_obj *o, 612255736Sdavidch struct ecore_exeq_elem *elem); 613255736Sdavidch 614255736Sdavidch/* Return positive if entry was optimized, 0 - if not, negative 615255736Sdavidch * in case of an error. 616255736Sdavidch */ 617255736Sdavidchtypedef int (*exe_q_optimize)(struct bxe_softc *sc, 618255736Sdavidch union ecore_qable_obj *o, 619255736Sdavidch struct ecore_exeq_elem *elem); 620255736Sdavidchtypedef int (*exe_q_execute)(struct bxe_softc *sc, 621255736Sdavidch union ecore_qable_obj *o, 622255736Sdavidch ecore_list_t *exe_chunk, 623255736Sdavidch unsigned long *ramrod_flags); 624255736Sdavidchtypedef struct ecore_exeq_elem * 625255736Sdavidch (*exe_q_get)(struct ecore_exe_queue_obj *o, 626255736Sdavidch struct ecore_exeq_elem *elem); 627255736Sdavidch 628255736Sdavidchstruct ecore_exe_queue_obj { 629255736Sdavidch /* Commands pending for an execution. */ 630255736Sdavidch ecore_list_t exe_queue; 631255736Sdavidch 632255736Sdavidch /* Commands pending for an completion. */ 633255736Sdavidch ecore_list_t pending_comp; 634255736Sdavidch 635255736Sdavidch ECORE_MUTEX_SPIN lock; 636255736Sdavidch 637255736Sdavidch /* Maximum length of commands' list for one execution */ 638255736Sdavidch int exe_chunk_len; 639255736Sdavidch 640255736Sdavidch union ecore_qable_obj *owner; 641255736Sdavidch 642255736Sdavidch /****** Virtual functions ******/ 643255736Sdavidch /** 644255736Sdavidch * Called before commands execution for commands that are really 645255736Sdavidch * going to be executed (after 'optimize'). 646255736Sdavidch * 647255736Sdavidch * Must run under exe_queue->lock 648255736Sdavidch */ 649255736Sdavidch exe_q_validate validate; 650255736Sdavidch 651255736Sdavidch /** 652255736Sdavidch * Called before removing pending commands, cleaning allocated 653255736Sdavidch * resources (e.g., credits from validate) 654255736Sdavidch */ 655255736Sdavidch exe_q_remove remove; 656255736Sdavidch 657255736Sdavidch /** 658255736Sdavidch * This will try to cancel the current pending commands list 659255736Sdavidch * considering the new command. 660255736Sdavidch * 661255736Sdavidch * Returns the number of optimized commands or a negative error code 662255736Sdavidch * 663255736Sdavidch * Must run under exe_queue->lock 664255736Sdavidch */ 665255736Sdavidch exe_q_optimize optimize; 666255736Sdavidch 667255736Sdavidch /** 668255736Sdavidch * Run the next commands chunk (owner specific). 669255736Sdavidch */ 670255736Sdavidch exe_q_execute execute; 671255736Sdavidch 672255736Sdavidch /** 673255736Sdavidch * Return the exe_queue element containing the specific command 674255736Sdavidch * if any. Otherwise return NULL. 675255736Sdavidch */ 676255736Sdavidch exe_q_get get; 677255736Sdavidch}; 678255736Sdavidch/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 679255736Sdavidch/* 680255736Sdavidch * Element in the VLAN_MAC registry list having all current configured 681255736Sdavidch * rules. 682255736Sdavidch */ 683255736Sdavidchstruct ecore_vlan_mac_registry_elem { 684255736Sdavidch ecore_list_entry_t link; 685255736Sdavidch 686255736Sdavidch /* Used to store the cam offset used for the mac/vlan/vlan-mac. 687255736Sdavidch * Relevant for 57710 and 57711 only. VLANs and MACs share the 688255736Sdavidch * same CAM for these chips. 689255736Sdavidch */ 690255736Sdavidch int cam_offset; 691255736Sdavidch 692255736Sdavidch /* Needed for DEL and RESTORE flows */ 693255736Sdavidch unsigned long vlan_mac_flags; 694255736Sdavidch 695255736Sdavidch union ecore_classification_ramrod_data u; 696255736Sdavidch}; 697255736Sdavidch 698255736Sdavidch/* Bits representing VLAN_MAC commands specific flags */ 699255736Sdavidchenum { 700255736Sdavidch ECORE_UC_LIST_MAC, 701255736Sdavidch ECORE_ETH_MAC, 702255736Sdavidch ECORE_ISCSI_ETH_MAC, 703255736Sdavidch ECORE_NETQ_ETH_MAC, 704255736Sdavidch ECORE_DONT_CONSUME_CAM_CREDIT, 705255736Sdavidch ECORE_DONT_CONSUME_CAM_CREDIT_DEST, 706255736Sdavidch}; 707255736Sdavidch 708255736Sdavidchstruct ecore_vlan_mac_ramrod_params { 709255736Sdavidch /* Object to run the command from */ 710255736Sdavidch struct ecore_vlan_mac_obj *vlan_mac_obj; 711255736Sdavidch 712255736Sdavidch /* General command flags: COMP_WAIT, etc. */ 713255736Sdavidch unsigned long ramrod_flags; 714255736Sdavidch 715255736Sdavidch /* Command specific configuration request */ 716255736Sdavidch struct ecore_vlan_mac_data user_req; 717255736Sdavidch}; 718255736Sdavidch 719255736Sdavidchstruct ecore_vlan_mac_obj { 720255736Sdavidch struct ecore_raw_obj raw; 721255736Sdavidch 722255736Sdavidch /* Bookkeeping list: will prevent the addition of already existing 723255736Sdavidch * entries. 724255736Sdavidch */ 725255736Sdavidch ecore_list_t head; 726255736Sdavidch /* Implement a simple reader/writer lock on the head list. 727255736Sdavidch * all these fields should only be accessed under the exe_queue lock 728255736Sdavidch */ 729255736Sdavidch uint8_t head_reader; /* Num. of readers accessing head list */ 730255736Sdavidch bool head_exe_request; /* Pending execution request. */ 731255736Sdavidch unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ 732255736Sdavidch 733255736Sdavidch /* Execution queue interface instance */ 734255736Sdavidch struct ecore_exe_queue_obj exe_queue; 735255736Sdavidch 736255736Sdavidch /* MACs credit pool */ 737255736Sdavidch struct ecore_credit_pool_obj *macs_pool; 738255736Sdavidch 739255736Sdavidch /* VLANs credit pool */ 740255736Sdavidch struct ecore_credit_pool_obj *vlans_pool; 741255736Sdavidch 742255736Sdavidch /* RAMROD command to be used */ 743255736Sdavidch int ramrod_cmd; 744255736Sdavidch 745255736Sdavidch /* copy first n elements onto preallocated buffer 746255736Sdavidch * 747255736Sdavidch * @param n number of elements to get 748255736Sdavidch * @param buf buffer preallocated by caller into which elements 749255736Sdavidch * will be copied. Note elements are 4-byte aligned 750255736Sdavidch * so buffer size must be able to accommodate the 751255736Sdavidch * aligned elements. 752255736Sdavidch * 753255736Sdavidch * @return number of copied bytes 754255736Sdavidch */ 755255736Sdavidch 756255736Sdavidch int (*get_n_elements)(struct bxe_softc *sc, 757255736Sdavidch struct ecore_vlan_mac_obj *o, int n, uint8_t *base, 758255736Sdavidch uint8_t stride, uint8_t size); 759255736Sdavidch 760255736Sdavidch /** 761255736Sdavidch * Checks if ADD-ramrod with the given params may be performed. 762255736Sdavidch * 763255736Sdavidch * @return zero if the element may be added 764255736Sdavidch */ 765255736Sdavidch 766255736Sdavidch int (*check_add)(struct bxe_softc *sc, 767255736Sdavidch struct ecore_vlan_mac_obj *o, 768255736Sdavidch union ecore_classification_ramrod_data *data); 769255736Sdavidch 770255736Sdavidch /** 771255736Sdavidch * Checks if DEL-ramrod with the given params may be performed. 772255736Sdavidch * 773255736Sdavidch * @return TRUE if the element may be deleted 774255736Sdavidch */ 775255736Sdavidch struct ecore_vlan_mac_registry_elem * 776255736Sdavidch (*check_del)(struct bxe_softc *sc, 777255736Sdavidch struct ecore_vlan_mac_obj *o, 778255736Sdavidch union ecore_classification_ramrod_data *data); 779255736Sdavidch 780255736Sdavidch /** 781255736Sdavidch * Checks if DEL-ramrod with the given params may be performed. 782255736Sdavidch * 783255736Sdavidch * @return TRUE if the element may be deleted 784255736Sdavidch */ 785255736Sdavidch bool (*check_move)(struct bxe_softc *sc, 786255736Sdavidch struct ecore_vlan_mac_obj *src_o, 787255736Sdavidch struct ecore_vlan_mac_obj *dst_o, 788255736Sdavidch union ecore_classification_ramrod_data *data); 789255736Sdavidch 790255736Sdavidch /** 791255736Sdavidch * Update the relevant credit object(s) (consume/return 792255736Sdavidch * correspondingly). 793255736Sdavidch */ 794255736Sdavidch bool (*get_credit)(struct ecore_vlan_mac_obj *o); 795255736Sdavidch bool (*put_credit)(struct ecore_vlan_mac_obj *o); 796255736Sdavidch bool (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset); 797255736Sdavidch bool (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset); 798255736Sdavidch 799255736Sdavidch /** 800255736Sdavidch * Configures one rule in the ramrod data buffer. 801255736Sdavidch */ 802255736Sdavidch void (*set_one_rule)(struct bxe_softc *sc, 803255736Sdavidch struct ecore_vlan_mac_obj *o, 804255736Sdavidch struct ecore_exeq_elem *elem, int rule_idx, 805255736Sdavidch int cam_offset); 806255736Sdavidch 807255736Sdavidch /** 808255736Sdavidch * Delete all configured elements having the given 809255736Sdavidch * vlan_mac_flags specification. Assumes no pending for 810255736Sdavidch * execution commands. Will schedule all all currently 811255736Sdavidch * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags 812255736Sdavidch * specification for deletion and will use the given 813255736Sdavidch * ramrod_flags for the last DEL operation. 814255736Sdavidch * 815255736Sdavidch * @param sc 816255736Sdavidch * @param o 817255736Sdavidch * @param ramrod_flags RAMROD_XX flags 818255736Sdavidch * 819255736Sdavidch * @return 0 if the last operation has completed successfully 820255736Sdavidch * and there are no more elements left, positive value 821255736Sdavidch * if there are pending for completion commands, 822255736Sdavidch * negative value in case of failure. 823255736Sdavidch */ 824255736Sdavidch int (*delete_all)(struct bxe_softc *sc, 825255736Sdavidch struct ecore_vlan_mac_obj *o, 826255736Sdavidch unsigned long *vlan_mac_flags, 827255736Sdavidch unsigned long *ramrod_flags); 828255736Sdavidch 829255736Sdavidch /** 830255736Sdavidch * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously 831255736Sdavidch * configured elements list. 832255736Sdavidch * 833255736Sdavidch * @param sc 834255736Sdavidch * @param p Command parameters (RAMROD_COMP_WAIT bit in 835255736Sdavidch * ramrod_flags is only taken into an account) 836255736Sdavidch * @param ppos a pointer to the cookie that should be given back in the 837255736Sdavidch * next call to make function handle the next element. If 838255736Sdavidch * *ppos is set to NULL it will restart the iterator. 839255736Sdavidch * If returned *ppos == NULL this means that the last 840255736Sdavidch * element has been handled. 841255736Sdavidch * 842255736Sdavidch * @return int 843255736Sdavidch */ 844255736Sdavidch int (*restore)(struct bxe_softc *sc, 845255736Sdavidch struct ecore_vlan_mac_ramrod_params *p, 846255736Sdavidch struct ecore_vlan_mac_registry_elem **ppos); 847255736Sdavidch 848255736Sdavidch /** 849255736Sdavidch * Should be called on a completion arrival. 850255736Sdavidch * 851255736Sdavidch * @param sc 852255736Sdavidch * @param o 853255736Sdavidch * @param cqe Completion element we are handling 854255736Sdavidch * @param ramrod_flags if RAMROD_CONT is set the next bulk of 855255736Sdavidch * pending commands will be executed. 856255736Sdavidch * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE 857255736Sdavidch * may also be set if needed. 858255736Sdavidch * 859255736Sdavidch * @return 0 if there are neither pending nor waiting for 860255736Sdavidch * completion commands. Positive value if there are 861255736Sdavidch * pending for execution or for completion commands. 862255736Sdavidch * Negative value in case of an error (including an 863255736Sdavidch * error in the cqe). 864255736Sdavidch */ 865255736Sdavidch int (*complete)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o, 866255736Sdavidch union event_ring_elem *cqe, 867255736Sdavidch unsigned long *ramrod_flags); 868255736Sdavidch 869255736Sdavidch /** 870255736Sdavidch * Wait for completion of all commands. Don't schedule new ones, 871255736Sdavidch * just wait. It assumes that the completion code will schedule 872255736Sdavidch * for new commands. 873255736Sdavidch */ 874255736Sdavidch int (*wait)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o); 875255736Sdavidch}; 876255736Sdavidch 877255736Sdavidchenum { 878255736Sdavidch ECORE_LLH_CAM_ISCSI_ETH_LINE = 0, 879255736Sdavidch ECORE_LLH_CAM_ETH_LINE, 880255736Sdavidch ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 881255736Sdavidch}; 882255736Sdavidch 883255736Sdavidchvoid ecore_set_mac_in_nig(struct bxe_softc *sc, 884255736Sdavidch bool add, unsigned char *dev_addr, int index); 885255736Sdavidch 886255736Sdavidch/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 887255736Sdavidch 888255736Sdavidch/* RX_MODE ramrod special flags: set in rx_mode_flags field in 889255736Sdavidch * a ecore_rx_mode_ramrod_params. 890255736Sdavidch */ 891255736Sdavidchenum { 892255736Sdavidch ECORE_RX_MODE_FCOE_ETH, 893255736Sdavidch ECORE_RX_MODE_ISCSI_ETH, 894255736Sdavidch}; 895255736Sdavidch 896255736Sdavidchenum { 897255736Sdavidch ECORE_ACCEPT_UNICAST, 898255736Sdavidch ECORE_ACCEPT_MULTICAST, 899255736Sdavidch ECORE_ACCEPT_ALL_UNICAST, 900255736Sdavidch ECORE_ACCEPT_ALL_MULTICAST, 901255736Sdavidch ECORE_ACCEPT_BROADCAST, 902255736Sdavidch ECORE_ACCEPT_UNMATCHED, 903255736Sdavidch ECORE_ACCEPT_ANY_VLAN 904255736Sdavidch}; 905255736Sdavidch 906255736Sdavidchstruct ecore_rx_mode_ramrod_params { 907255736Sdavidch struct ecore_rx_mode_obj *rx_mode_obj; 908255736Sdavidch unsigned long *pstate; 909255736Sdavidch int state; 910255736Sdavidch uint8_t cl_id; 911255736Sdavidch uint32_t cid; 912255736Sdavidch uint8_t func_id; 913255736Sdavidch unsigned long ramrod_flags; 914255736Sdavidch unsigned long rx_mode_flags; 915255736Sdavidch 916255736Sdavidch /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to 917255736Sdavidch * a tstorm_eth_mac_filter_config (e1x). 918255736Sdavidch */ 919255736Sdavidch void *rdata; 920255736Sdavidch ecore_dma_addr_t rdata_mapping; 921255736Sdavidch 922255736Sdavidch /* Rx mode settings */ 923255736Sdavidch unsigned long rx_accept_flags; 924255736Sdavidch 925255736Sdavidch /* internal switching settings */ 926255736Sdavidch unsigned long tx_accept_flags; 927255736Sdavidch}; 928255736Sdavidch 929255736Sdavidchstruct ecore_rx_mode_obj { 930255736Sdavidch int (*config_rx_mode)(struct bxe_softc *sc, 931255736Sdavidch struct ecore_rx_mode_ramrod_params *p); 932255736Sdavidch 933255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, 934255736Sdavidch struct ecore_rx_mode_ramrod_params *p); 935255736Sdavidch}; 936255736Sdavidch 937255736Sdavidch/********************** Set multicast group ***********************************/ 938255736Sdavidch 939255736Sdavidchstruct ecore_mcast_list_elem { 940255736Sdavidch ecore_list_entry_t link; 941255736Sdavidch uint8_t *mac; 942255736Sdavidch}; 943255736Sdavidch 944255736Sdavidchunion ecore_mcast_config_data { 945255736Sdavidch uint8_t *mac; 946255736Sdavidch uint8_t bin; /* used in a RESTORE flow */ 947255736Sdavidch}; 948255736Sdavidch 949255736Sdavidchstruct ecore_mcast_ramrod_params { 950255736Sdavidch struct ecore_mcast_obj *mcast_obj; 951255736Sdavidch 952255736Sdavidch /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ 953255736Sdavidch unsigned long ramrod_flags; 954255736Sdavidch 955255736Sdavidch ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */ 956255736Sdavidch /** TODO: 957255736Sdavidch * - rename it to macs_num. 958255736Sdavidch * - Add a new command type for handling pending commands 959255736Sdavidch * (remove "zero semantics"). 960255736Sdavidch * 961255736Sdavidch * Length of mcast_list. If zero and ADD_CONT command - post 962255736Sdavidch * pending commands. 963255736Sdavidch */ 964255736Sdavidch int mcast_list_len; 965255736Sdavidch}; 966255736Sdavidch 967255736Sdavidchenum ecore_mcast_cmd { 968255736Sdavidch ECORE_MCAST_CMD_ADD, 969255736Sdavidch ECORE_MCAST_CMD_CONT, 970255736Sdavidch ECORE_MCAST_CMD_DEL, 971255736Sdavidch ECORE_MCAST_CMD_RESTORE, 972255736Sdavidch}; 973255736Sdavidch 974255736Sdavidchstruct ecore_mcast_obj { 975255736Sdavidch struct ecore_raw_obj raw; 976255736Sdavidch 977255736Sdavidch union { 978255736Sdavidch struct { 979255736Sdavidch #define ECORE_MCAST_BINS_NUM 256 980255736Sdavidch #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64) 981255736Sdavidch uint64_t vec[ECORE_MCAST_VEC_SZ]; 982255736Sdavidch 983255736Sdavidch /** Number of BINs to clear. Should be updated 984255736Sdavidch * immediately when a command arrives in order to 985255736Sdavidch * properly create DEL commands. 986255736Sdavidch */ 987255736Sdavidch int num_bins_set; 988255736Sdavidch } aprox_match; 989255736Sdavidch 990255736Sdavidch struct { 991255736Sdavidch ecore_list_t macs; 992255736Sdavidch int num_macs_set; 993255736Sdavidch } exact_match; 994255736Sdavidch } registry; 995255736Sdavidch 996255736Sdavidch /* Pending commands */ 997255736Sdavidch ecore_list_t pending_cmds_head; 998255736Sdavidch 999255736Sdavidch /* A state that is set in raw.pstate, when there are pending commands */ 1000255736Sdavidch int sched_state; 1001255736Sdavidch 1002255736Sdavidch /* Maximal number of mcast MACs configured in one command */ 1003255736Sdavidch int max_cmd_len; 1004255736Sdavidch 1005255736Sdavidch /* Total number of currently pending MACs to configure: both 1006255736Sdavidch * in the pending commands list and in the current command. 1007255736Sdavidch */ 1008255736Sdavidch int total_pending_num; 1009255736Sdavidch 1010255736Sdavidch uint8_t engine_id; 1011255736Sdavidch 1012255736Sdavidch /** 1013255736Sdavidch * @param cmd command to execute (ECORE_MCAST_CMD_X, see above) 1014255736Sdavidch */ 1015255736Sdavidch int (*config_mcast)(struct bxe_softc *sc, 1016255736Sdavidch struct ecore_mcast_ramrod_params *p, 1017255736Sdavidch enum ecore_mcast_cmd cmd); 1018255736Sdavidch 1019255736Sdavidch /** 1020255736Sdavidch * Fills the ramrod data during the RESTORE flow. 1021255736Sdavidch * 1022255736Sdavidch * @param sc 1023255736Sdavidch * @param o 1024255736Sdavidch * @param start_idx Registry index to start from 1025255736Sdavidch * @param rdata_idx Index in the ramrod data to start from 1026255736Sdavidch * 1027255736Sdavidch * @return -1 if we handled the whole registry or index of the last 1028255736Sdavidch * handled registry element. 1029255736Sdavidch */ 1030255736Sdavidch int (*hdl_restore)(struct bxe_softc *sc, struct ecore_mcast_obj *o, 1031255736Sdavidch int start_bin, int *rdata_idx); 1032255736Sdavidch 1033255736Sdavidch int (*enqueue_cmd)(struct bxe_softc *sc, struct ecore_mcast_obj *o, 1034255736Sdavidch struct ecore_mcast_ramrod_params *p, 1035255736Sdavidch enum ecore_mcast_cmd cmd); 1036255736Sdavidch 1037255736Sdavidch void (*set_one_rule)(struct bxe_softc *sc, 1038255736Sdavidch struct ecore_mcast_obj *o, int idx, 1039255736Sdavidch union ecore_mcast_config_data *cfg_data, 1040255736Sdavidch enum ecore_mcast_cmd cmd); 1041255736Sdavidch 1042255736Sdavidch /** Checks if there are more mcast MACs to be set or a previous 1043255736Sdavidch * command is still pending. 1044255736Sdavidch */ 1045255736Sdavidch bool (*check_pending)(struct ecore_mcast_obj *o); 1046255736Sdavidch 1047255736Sdavidch /** 1048255736Sdavidch * Set/Clear/Check SCHEDULED state of the object 1049255736Sdavidch */ 1050255736Sdavidch void (*set_sched)(struct ecore_mcast_obj *o); 1051255736Sdavidch void (*clear_sched)(struct ecore_mcast_obj *o); 1052255736Sdavidch bool (*check_sched)(struct ecore_mcast_obj *o); 1053255736Sdavidch 1054255736Sdavidch /* Wait until all pending commands complete */ 1055255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, struct ecore_mcast_obj *o); 1056255736Sdavidch 1057255736Sdavidch /** 1058255736Sdavidch * Handle the internal object counters needed for proper 1059255736Sdavidch * commands handling. Checks that the provided parameters are 1060255736Sdavidch * feasible. 1061255736Sdavidch */ 1062255736Sdavidch int (*validate)(struct bxe_softc *sc, 1063255736Sdavidch struct ecore_mcast_ramrod_params *p, 1064255736Sdavidch enum ecore_mcast_cmd cmd); 1065255736Sdavidch 1066255736Sdavidch /** 1067255736Sdavidch * Restore the values of internal counters in case of a failure. 1068255736Sdavidch */ 1069255736Sdavidch void (*revert)(struct bxe_softc *sc, 1070255736Sdavidch struct ecore_mcast_ramrod_params *p, 1071255736Sdavidch int old_num_bins); 1072255736Sdavidch 1073255736Sdavidch int (*get_registry_size)(struct ecore_mcast_obj *o); 1074255736Sdavidch void (*set_registry_size)(struct ecore_mcast_obj *o, int n); 1075255736Sdavidch}; 1076255736Sdavidch 1077255736Sdavidch/*************************** Credit handling **********************************/ 1078255736Sdavidchstruct ecore_credit_pool_obj { 1079255736Sdavidch 1080255736Sdavidch /* Current amount of credit in the pool */ 1081255736Sdavidch ecore_atomic_t credit; 1082255736Sdavidch 1083255736Sdavidch /* Maximum allowed credit. put() will check against it. */ 1084255736Sdavidch int pool_sz; 1085255736Sdavidch 1086255736Sdavidch /* Allocate a pool table statically. 1087255736Sdavidch * 1088255736Sdavidch * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) 1089255736Sdavidch * 1090255736Sdavidch * The set bit in the table will mean that the entry is available. 1091255736Sdavidch */ 1092255736Sdavidch#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) 1093255736Sdavidch uint64_t pool_mirror[ECORE_POOL_VEC_SIZE]; 1094255736Sdavidch 1095255736Sdavidch /* Base pool offset (initialized differently */ 1096255736Sdavidch int base_pool_offset; 1097255736Sdavidch 1098255736Sdavidch /** 1099255736Sdavidch * Get the next free pool entry. 1100255736Sdavidch * 1101255736Sdavidch * @return TRUE if there was a free entry in the pool 1102255736Sdavidch */ 1103255736Sdavidch bool (*get_entry)(struct ecore_credit_pool_obj *o, int *entry); 1104255736Sdavidch 1105255736Sdavidch /** 1106255736Sdavidch * Return the entry back to the pool. 1107255736Sdavidch * 1108255736Sdavidch * @return TRUE if entry is legal and has been successfully 1109255736Sdavidch * returned to the pool. 1110255736Sdavidch */ 1111255736Sdavidch bool (*put_entry)(struct ecore_credit_pool_obj *o, int entry); 1112255736Sdavidch 1113255736Sdavidch /** 1114255736Sdavidch * Get the requested amount of credit from the pool. 1115255736Sdavidch * 1116255736Sdavidch * @param cnt Amount of requested credit 1117255736Sdavidch * @return TRUE if the operation is successful 1118255736Sdavidch */ 1119255736Sdavidch bool (*get)(struct ecore_credit_pool_obj *o, int cnt); 1120255736Sdavidch 1121255736Sdavidch /** 1122255736Sdavidch * Returns the credit to the pool. 1123255736Sdavidch * 1124255736Sdavidch * @param cnt Amount of credit to return 1125255736Sdavidch * @return TRUE if the operation is successful 1126255736Sdavidch */ 1127255736Sdavidch bool (*put)(struct ecore_credit_pool_obj *o, int cnt); 1128255736Sdavidch 1129255736Sdavidch /** 1130255736Sdavidch * Reads the current amount of credit. 1131255736Sdavidch */ 1132255736Sdavidch int (*check)(struct ecore_credit_pool_obj *o); 1133255736Sdavidch}; 1134255736Sdavidch 1135255736Sdavidch/*************************** RSS configuration ********************************/ 1136255736Sdavidchenum { 1137255736Sdavidch /* RSS_MODE bits are mutually exclusive */ 1138255736Sdavidch ECORE_RSS_MODE_DISABLED, 1139255736Sdavidch ECORE_RSS_MODE_REGULAR, 1140255736Sdavidch 1141255736Sdavidch ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ 1142255736Sdavidch 1143255736Sdavidch ECORE_RSS_IPV4, 1144255736Sdavidch ECORE_RSS_IPV4_TCP, 1145255736Sdavidch ECORE_RSS_IPV4_UDP, 1146255736Sdavidch ECORE_RSS_IPV6, 1147255736Sdavidch ECORE_RSS_IPV6_TCP, 1148255736Sdavidch ECORE_RSS_IPV6_UDP, 1149255736Sdavidch 1150255736Sdavidch ECORE_RSS_TUNNELING, 1151258203Sedavis#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */ 1152258203Sedavis ECORE_RSS_MODE_ESX51, 1153258203Sedavis#endif 1154255736Sdavidch}; 1155255736Sdavidch 1156255736Sdavidchstruct ecore_config_rss_params { 1157255736Sdavidch struct ecore_rss_config_obj *rss_obj; 1158255736Sdavidch 1159255736Sdavidch /* may have RAMROD_COMP_WAIT set only */ 1160255736Sdavidch unsigned long ramrod_flags; 1161255736Sdavidch 1162255736Sdavidch /* ECORE_RSS_X bits */ 1163255736Sdavidch unsigned long rss_flags; 1164255736Sdavidch 1165255736Sdavidch /* Number hash bits to take into an account */ 1166255736Sdavidch uint8_t rss_result_mask; 1167255736Sdavidch 1168255736Sdavidch /* Indirection table */ 1169255736Sdavidch uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 1170255736Sdavidch 1171255736Sdavidch /* RSS hash values */ 1172255736Sdavidch uint32_t rss_key[10]; 1173255736Sdavidch 1174255736Sdavidch /* valid only iff ECORE_RSS_UPDATE_TOE is set */ 1175255736Sdavidch uint16_t toe_rss_bitmap; 1176255736Sdavidch 1177255736Sdavidch /* valid iff ECORE_RSS_TUNNELING is set */ 1178255736Sdavidch uint16_t tunnel_value; 1179255736Sdavidch uint16_t tunnel_mask; 1180255736Sdavidch}; 1181255736Sdavidch 1182255736Sdavidchstruct ecore_rss_config_obj { 1183255736Sdavidch struct ecore_raw_obj raw; 1184255736Sdavidch 1185255736Sdavidch /* RSS engine to use */ 1186255736Sdavidch uint8_t engine_id; 1187255736Sdavidch 1188255736Sdavidch /* Last configured indirection table */ 1189255736Sdavidch uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 1190255736Sdavidch 1191255736Sdavidch /* flags for enabling 4-tupple hash on UDP */ 1192255736Sdavidch uint8_t udp_rss_v4; 1193255736Sdavidch uint8_t udp_rss_v6; 1194255736Sdavidch 1195255736Sdavidch int (*config_rss)(struct bxe_softc *sc, 1196255736Sdavidch struct ecore_config_rss_params *p); 1197255736Sdavidch}; 1198255736Sdavidch 1199255736Sdavidch/*********************** Queue state update ***********************************/ 1200255736Sdavidch 1201255736Sdavidch/* UPDATE command options */ 1202255736Sdavidchenum { 1203255736Sdavidch ECORE_Q_UPDATE_IN_VLAN_REM, 1204255736Sdavidch ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, 1205255736Sdavidch ECORE_Q_UPDATE_OUT_VLAN_REM, 1206255736Sdavidch ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, 1207255736Sdavidch ECORE_Q_UPDATE_ANTI_SPOOF, 1208255736Sdavidch ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, 1209255736Sdavidch ECORE_Q_UPDATE_ACTIVATE, 1210255736Sdavidch ECORE_Q_UPDATE_ACTIVATE_CHNG, 1211255736Sdavidch ECORE_Q_UPDATE_DEF_VLAN_EN, 1212255736Sdavidch ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, 1213255736Sdavidch ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, 1214255736Sdavidch ECORE_Q_UPDATE_SILENT_VLAN_REM, 1215255736Sdavidch ECORE_Q_UPDATE_TX_SWITCHING_CHNG, 1216255736Sdavidch ECORE_Q_UPDATE_TX_SWITCHING, 1217255736Sdavidch}; 1218255736Sdavidch 1219255736Sdavidch/* Allowed Queue states */ 1220255736Sdavidchenum ecore_q_state { 1221255736Sdavidch ECORE_Q_STATE_RESET, 1222255736Sdavidch ECORE_Q_STATE_INITIALIZED, 1223255736Sdavidch ECORE_Q_STATE_ACTIVE, 1224255736Sdavidch ECORE_Q_STATE_MULTI_COS, 1225255736Sdavidch ECORE_Q_STATE_MCOS_TERMINATED, 1226255736Sdavidch ECORE_Q_STATE_INACTIVE, 1227255736Sdavidch ECORE_Q_STATE_STOPPED, 1228255736Sdavidch ECORE_Q_STATE_TERMINATED, 1229255736Sdavidch ECORE_Q_STATE_FLRED, 1230255736Sdavidch ECORE_Q_STATE_MAX, 1231255736Sdavidch}; 1232255736Sdavidch 1233255736Sdavidch/* Allowed Queue states */ 1234255736Sdavidchenum ecore_q_logical_state { 1235255736Sdavidch ECORE_Q_LOGICAL_STATE_ACTIVE, 1236255736Sdavidch ECORE_Q_LOGICAL_STATE_STOPPED, 1237255736Sdavidch}; 1238255736Sdavidch 1239255736Sdavidch/* Allowed commands */ 1240255736Sdavidchenum ecore_queue_cmd { 1241255736Sdavidch ECORE_Q_CMD_INIT, 1242255736Sdavidch ECORE_Q_CMD_SETUP, 1243255736Sdavidch ECORE_Q_CMD_SETUP_TX_ONLY, 1244255736Sdavidch ECORE_Q_CMD_DEACTIVATE, 1245255736Sdavidch ECORE_Q_CMD_ACTIVATE, 1246255736Sdavidch ECORE_Q_CMD_UPDATE, 1247255736Sdavidch ECORE_Q_CMD_UPDATE_TPA, 1248255736Sdavidch ECORE_Q_CMD_HALT, 1249255736Sdavidch ECORE_Q_CMD_CFC_DEL, 1250255736Sdavidch ECORE_Q_CMD_TERMINATE, 1251255736Sdavidch ECORE_Q_CMD_EMPTY, 1252255736Sdavidch ECORE_Q_CMD_MAX, 1253255736Sdavidch}; 1254255736Sdavidch 1255255736Sdavidch/* queue SETUP + INIT flags */ 1256255736Sdavidchenum { 1257255736Sdavidch ECORE_Q_FLG_TPA, 1258255736Sdavidch ECORE_Q_FLG_TPA_IPV6, 1259255736Sdavidch ECORE_Q_FLG_TPA_GRO, 1260255736Sdavidch ECORE_Q_FLG_STATS, 1261255736Sdavidch ECORE_Q_FLG_ZERO_STATS, 1262255736Sdavidch ECORE_Q_FLG_ACTIVE, 1263255736Sdavidch ECORE_Q_FLG_OV, 1264255736Sdavidch ECORE_Q_FLG_VLAN, 1265255736Sdavidch ECORE_Q_FLG_COS, 1266255736Sdavidch ECORE_Q_FLG_HC, 1267255736Sdavidch ECORE_Q_FLG_HC_EN, 1268255736Sdavidch ECORE_Q_FLG_DHC, 1269255736Sdavidch ECORE_Q_FLG_OOO, 1270255736Sdavidch ECORE_Q_FLG_FCOE, 1271255736Sdavidch ECORE_Q_FLG_LEADING_RSS, 1272255736Sdavidch ECORE_Q_FLG_MCAST, 1273255736Sdavidch ECORE_Q_FLG_DEF_VLAN, 1274255736Sdavidch ECORE_Q_FLG_TX_SWITCH, 1275255736Sdavidch ECORE_Q_FLG_TX_SEC, 1276255736Sdavidch ECORE_Q_FLG_ANTI_SPOOF, 1277255736Sdavidch ECORE_Q_FLG_SILENT_VLAN_REM, 1278255736Sdavidch ECORE_Q_FLG_FORCE_DEFAULT_PRI, 1279255736Sdavidch ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, 1280255736Sdavidch ECORE_Q_FLG_PCSUM_ON_PKT, 1281255736Sdavidch ECORE_Q_FLG_TUN_INC_INNER_IP_ID 1282255736Sdavidch}; 1283255736Sdavidch 1284255736Sdavidch/* Queue type options: queue type may be a combination of below. */ 1285255736Sdavidchenum ecore_q_type { 1286255736Sdavidch ECORE_Q_TYPE_FWD, 1287255736Sdavidch /** TODO: Consider moving both these flags into the init() 1288255736Sdavidch * ramrod params. 1289255736Sdavidch */ 1290255736Sdavidch ECORE_Q_TYPE_HAS_RX, 1291255736Sdavidch ECORE_Q_TYPE_HAS_TX, 1292255736Sdavidch}; 1293255736Sdavidch 1294255736Sdavidch#define ECORE_PRIMARY_CID_INDEX 0 1295255736Sdavidch#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */ 1296255736Sdavidch#define ECORE_MULTI_TX_COS_E2_E3A0 2 1297255736Sdavidch#define ECORE_MULTI_TX_COS_E3B0 3 1298255736Sdavidch#define ECORE_MULTI_TX_COS 3 /* Maximum possible */ 1299255736Sdavidch#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN) 1300255736Sdavidch 1301255736Sdavidchstruct ecore_queue_init_params { 1302255736Sdavidch struct { 1303255736Sdavidch unsigned long flags; 1304255736Sdavidch uint16_t hc_rate; 1305255736Sdavidch uint8_t fw_sb_id; 1306255736Sdavidch uint8_t sb_cq_index; 1307255736Sdavidch } tx; 1308255736Sdavidch 1309255736Sdavidch struct { 1310255736Sdavidch unsigned long flags; 1311255736Sdavidch uint16_t hc_rate; 1312255736Sdavidch uint8_t fw_sb_id; 1313255736Sdavidch uint8_t sb_cq_index; 1314255736Sdavidch } rx; 1315255736Sdavidch 1316255736Sdavidch /* CID context in the host memory */ 1317255736Sdavidch struct eth_context *cxts[ECORE_MULTI_TX_COS]; 1318255736Sdavidch 1319255736Sdavidch /* maximum number of cos supported by hardware */ 1320255736Sdavidch uint8_t max_cos; 1321255736Sdavidch}; 1322255736Sdavidch 1323255736Sdavidchstruct ecore_queue_terminate_params { 1324255736Sdavidch /* index within the tx_only cids of this queue object */ 1325255736Sdavidch uint8_t cid_index; 1326255736Sdavidch}; 1327255736Sdavidch 1328255736Sdavidchstruct ecore_queue_cfc_del_params { 1329255736Sdavidch /* index within the tx_only cids of this queue object */ 1330255736Sdavidch uint8_t cid_index; 1331255736Sdavidch}; 1332255736Sdavidch 1333255736Sdavidchstruct ecore_queue_update_params { 1334255736Sdavidch unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */ 1335255736Sdavidch uint16_t def_vlan; 1336255736Sdavidch uint16_t silent_removal_value; 1337255736Sdavidch uint16_t silent_removal_mask; 1338255736Sdavidch/* index within the tx_only cids of this queue object */ 1339255736Sdavidch uint8_t cid_index; 1340255736Sdavidch}; 1341255736Sdavidch 1342255736Sdavidchstruct rxq_pause_params { 1343255736Sdavidch uint16_t bd_th_lo; 1344255736Sdavidch uint16_t bd_th_hi; 1345255736Sdavidch uint16_t rcq_th_lo; 1346255736Sdavidch uint16_t rcq_th_hi; 1347255736Sdavidch uint16_t sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */ 1348255736Sdavidch uint16_t sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */ 1349255736Sdavidch uint16_t pri_map; 1350255736Sdavidch}; 1351255736Sdavidch 1352255736Sdavidch/* general */ 1353255736Sdavidchstruct ecore_general_setup_params { 1354255736Sdavidch /* valid iff ECORE_Q_FLG_STATS */ 1355255736Sdavidch uint8_t stat_id; 1356255736Sdavidch 1357255736Sdavidch uint8_t spcl_id; 1358255736Sdavidch uint16_t mtu; 1359255736Sdavidch uint8_t cos; 1360255736Sdavidch}; 1361255736Sdavidch 1362255736Sdavidchstruct ecore_rxq_setup_params { 1363255736Sdavidch /* dma */ 1364255736Sdavidch ecore_dma_addr_t dscr_map; 1365255736Sdavidch ecore_dma_addr_t sge_map; 1366255736Sdavidch ecore_dma_addr_t rcq_map; 1367255736Sdavidch ecore_dma_addr_t rcq_np_map; 1368255736Sdavidch 1369255736Sdavidch uint16_t drop_flags; 1370255736Sdavidch uint16_t buf_sz; 1371255736Sdavidch uint8_t fw_sb_id; 1372255736Sdavidch uint8_t cl_qzone_id; 1373255736Sdavidch 1374255736Sdavidch /* valid iff ECORE_Q_FLG_TPA */ 1375255736Sdavidch uint16_t tpa_agg_sz; 1376255736Sdavidch uint16_t sge_buf_sz; 1377255736Sdavidch uint8_t max_sges_pkt; 1378255736Sdavidch uint8_t max_tpa_queues; 1379255736Sdavidch uint8_t rss_engine_id; 1380255736Sdavidch 1381255736Sdavidch /* valid iff ECORE_Q_FLG_MCAST */ 1382255736Sdavidch uint8_t mcast_engine_id; 1383255736Sdavidch 1384255736Sdavidch uint8_t cache_line_log; 1385255736Sdavidch 1386255736Sdavidch uint8_t sb_cq_index; 1387255736Sdavidch 1388255736Sdavidch /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ 1389255736Sdavidch uint16_t silent_removal_value; 1390255736Sdavidch uint16_t silent_removal_mask; 1391255736Sdavidch}; 1392255736Sdavidch 1393255736Sdavidchstruct ecore_txq_setup_params { 1394255736Sdavidch /* dma */ 1395255736Sdavidch ecore_dma_addr_t dscr_map; 1396255736Sdavidch 1397255736Sdavidch uint8_t fw_sb_id; 1398255736Sdavidch uint8_t sb_cq_index; 1399255736Sdavidch uint8_t cos; /* valid iff ECORE_Q_FLG_COS */ 1400255736Sdavidch uint16_t traffic_type; 1401255736Sdavidch /* equals to the leading rss client id, used for TX classification*/ 1402255736Sdavidch uint8_t tss_leading_cl_id; 1403255736Sdavidch 1404255736Sdavidch /* valid iff ECORE_Q_FLG_DEF_VLAN */ 1405255736Sdavidch uint16_t default_vlan; 1406255736Sdavidch}; 1407255736Sdavidch 1408255736Sdavidchstruct ecore_queue_setup_params { 1409255736Sdavidch struct ecore_general_setup_params gen_params; 1410255736Sdavidch struct ecore_txq_setup_params txq_params; 1411255736Sdavidch struct ecore_rxq_setup_params rxq_params; 1412255736Sdavidch struct rxq_pause_params pause_params; 1413255736Sdavidch unsigned long flags; 1414255736Sdavidch}; 1415255736Sdavidch 1416255736Sdavidchstruct ecore_queue_setup_tx_only_params { 1417255736Sdavidch struct ecore_general_setup_params gen_params; 1418255736Sdavidch struct ecore_txq_setup_params txq_params; 1419255736Sdavidch unsigned long flags; 1420255736Sdavidch /* index within the tx_only cids of this queue object */ 1421255736Sdavidch uint8_t cid_index; 1422255736Sdavidch}; 1423255736Sdavidch 1424255736Sdavidchstruct ecore_queue_state_params { 1425255736Sdavidch struct ecore_queue_sp_obj *q_obj; 1426255736Sdavidch 1427255736Sdavidch /* Current command */ 1428255736Sdavidch enum ecore_queue_cmd cmd; 1429255736Sdavidch 1430255736Sdavidch /* may have RAMROD_COMP_WAIT set only */ 1431255736Sdavidch unsigned long ramrod_flags; 1432255736Sdavidch 1433255736Sdavidch /* Params according to the current command */ 1434255736Sdavidch union { 1435255736Sdavidch struct ecore_queue_update_params update; 1436255736Sdavidch struct ecore_queue_setup_params setup; 1437255736Sdavidch struct ecore_queue_init_params init; 1438255736Sdavidch struct ecore_queue_setup_tx_only_params tx_only; 1439255736Sdavidch struct ecore_queue_terminate_params terminate; 1440255736Sdavidch struct ecore_queue_cfc_del_params cfc_del; 1441255736Sdavidch } params; 1442255736Sdavidch}; 1443255736Sdavidch 1444255736Sdavidchstruct ecore_viflist_params { 1445255736Sdavidch uint8_t echo_res; 1446255736Sdavidch uint8_t func_bit_map_res; 1447255736Sdavidch}; 1448255736Sdavidch 1449255736Sdavidchstruct ecore_queue_sp_obj { 1450255736Sdavidch uint32_t cids[ECORE_MULTI_TX_COS]; 1451255736Sdavidch uint8_t cl_id; 1452255736Sdavidch uint8_t func_id; 1453255736Sdavidch 1454255736Sdavidch /* number of traffic classes supported by queue. 1455255736Sdavidch * The primary connection of the queue supports the first traffic 1456255736Sdavidch * class. Any further traffic class is supported by a tx-only 1457255736Sdavidch * connection. 1458255736Sdavidch * 1459255736Sdavidch * Therefore max_cos is also a number of valid entries in the cids 1460255736Sdavidch * array. 1461255736Sdavidch */ 1462255736Sdavidch uint8_t max_cos; 1463255736Sdavidch uint8_t num_tx_only, next_tx_only; 1464255736Sdavidch 1465255736Sdavidch enum ecore_q_state state, next_state; 1466255736Sdavidch 1467255736Sdavidch /* bits from enum ecore_q_type */ 1468255736Sdavidch unsigned long type; 1469255736Sdavidch 1470255736Sdavidch /* ECORE_Q_CMD_XX bits. This object implements "one 1471255736Sdavidch * pending" paradigm but for debug and tracing purposes it's 1472255736Sdavidch * more convenient to have different bits for different 1473255736Sdavidch * commands. 1474255736Sdavidch */ 1475255736Sdavidch unsigned long pending; 1476255736Sdavidch 1477255736Sdavidch /* Buffer to use as a ramrod data and its mapping */ 1478255736Sdavidch void *rdata; 1479255736Sdavidch ecore_dma_addr_t rdata_mapping; 1480255736Sdavidch 1481255736Sdavidch /** 1482255736Sdavidch * Performs one state change according to the given parameters. 1483255736Sdavidch * 1484255736Sdavidch * @return 0 in case of success and negative value otherwise. 1485255736Sdavidch */ 1486255736Sdavidch int (*send_cmd)(struct bxe_softc *sc, 1487255736Sdavidch struct ecore_queue_state_params *params); 1488255736Sdavidch 1489255736Sdavidch /** 1490255736Sdavidch * Sets the pending bit according to the requested transition. 1491255736Sdavidch */ 1492255736Sdavidch int (*set_pending)(struct ecore_queue_sp_obj *o, 1493255736Sdavidch struct ecore_queue_state_params *params); 1494255736Sdavidch 1495255736Sdavidch /** 1496255736Sdavidch * Checks that the requested state transition is legal. 1497255736Sdavidch */ 1498255736Sdavidch int (*check_transition)(struct bxe_softc *sc, 1499255736Sdavidch struct ecore_queue_sp_obj *o, 1500255736Sdavidch struct ecore_queue_state_params *params); 1501255736Sdavidch 1502255736Sdavidch /** 1503255736Sdavidch * Completes the pending command. 1504255736Sdavidch */ 1505255736Sdavidch int (*complete_cmd)(struct bxe_softc *sc, 1506255736Sdavidch struct ecore_queue_sp_obj *o, 1507255736Sdavidch enum ecore_queue_cmd); 1508255736Sdavidch 1509255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, 1510255736Sdavidch struct ecore_queue_sp_obj *o, 1511255736Sdavidch enum ecore_queue_cmd cmd); 1512255736Sdavidch}; 1513255736Sdavidch 1514255736Sdavidch/********************** Function state update *********************************/ 1515255736Sdavidch/* Allowed Function states */ 1516255736Sdavidchenum ecore_func_state { 1517255736Sdavidch ECORE_F_STATE_RESET, 1518255736Sdavidch ECORE_F_STATE_INITIALIZED, 1519255736Sdavidch ECORE_F_STATE_STARTED, 1520255736Sdavidch ECORE_F_STATE_TX_STOPPED, 1521255736Sdavidch ECORE_F_STATE_MAX, 1522255736Sdavidch}; 1523255736Sdavidch 1524255736Sdavidch/* Allowed Function commands */ 1525255736Sdavidchenum ecore_func_cmd { 1526255736Sdavidch ECORE_F_CMD_HW_INIT, 1527255736Sdavidch ECORE_F_CMD_START, 1528255736Sdavidch ECORE_F_CMD_STOP, 1529255736Sdavidch ECORE_F_CMD_HW_RESET, 1530255736Sdavidch ECORE_F_CMD_AFEX_UPDATE, 1531255736Sdavidch ECORE_F_CMD_AFEX_VIFLISTS, 1532255736Sdavidch ECORE_F_CMD_TX_STOP, 1533255736Sdavidch ECORE_F_CMD_TX_START, 1534255736Sdavidch ECORE_F_CMD_SWITCH_UPDATE, 1535255736Sdavidch ECORE_F_CMD_MAX, 1536255736Sdavidch}; 1537255736Sdavidch 1538255736Sdavidchstruct ecore_func_hw_init_params { 1539255736Sdavidch /* A load phase returned by MCP. 1540255736Sdavidch * 1541255736Sdavidch * May be: 1542255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 1543255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON 1544255736Sdavidch * FW_MSG_CODE_DRV_LOAD_PORT 1545255736Sdavidch * FW_MSG_CODE_DRV_LOAD_FUNCTION 1546255736Sdavidch */ 1547255736Sdavidch uint32_t load_phase; 1548255736Sdavidch}; 1549255736Sdavidch 1550255736Sdavidchstruct ecore_func_hw_reset_params { 1551255736Sdavidch /* A load phase returned by MCP. 1552255736Sdavidch * 1553255736Sdavidch * May be: 1554255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 1555255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON 1556255736Sdavidch * FW_MSG_CODE_DRV_LOAD_PORT 1557255736Sdavidch * FW_MSG_CODE_DRV_LOAD_FUNCTION 1558255736Sdavidch */ 1559255736Sdavidch uint32_t reset_phase; 1560255736Sdavidch}; 1561255736Sdavidch 1562255736Sdavidchstruct ecore_func_start_params { 1563255736Sdavidch /* Multi Function mode: 1564255736Sdavidch * - Single Function 1565255736Sdavidch * - Switch Dependent 1566255736Sdavidch * - Switch Independent 1567255736Sdavidch */ 1568255736Sdavidch uint16_t mf_mode; 1569255736Sdavidch 1570255736Sdavidch /* Switch Dependent mode outer VLAN tag */ 1571255736Sdavidch uint16_t sd_vlan_tag; 1572255736Sdavidch 1573255736Sdavidch /* Function cos mode */ 1574255736Sdavidch uint8_t network_cos_mode; 1575255736Sdavidch 1576255736Sdavidch /* NVGRE classification enablement */ 1577255736Sdavidch uint8_t nvgre_clss_en; 1578255736Sdavidch 1579255736Sdavidch /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ 1580255736Sdavidch uint8_t gre_tunnel_mode; 1581255736Sdavidch 1582255736Sdavidch /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ 1583255736Sdavidch uint8_t gre_tunnel_rss; 1584255736Sdavidch 1585255736Sdavidch}; 1586255736Sdavidch 1587255736Sdavidchstruct ecore_func_switch_update_params { 1588255736Sdavidch uint8_t suspend; 1589255736Sdavidch}; 1590255736Sdavidch 1591255736Sdavidchstruct ecore_func_afex_update_params { 1592255736Sdavidch uint16_t vif_id; 1593255736Sdavidch uint16_t afex_default_vlan; 1594255736Sdavidch uint8_t allowed_priorities; 1595255736Sdavidch}; 1596255736Sdavidch 1597255736Sdavidchstruct ecore_func_afex_viflists_params { 1598255736Sdavidch uint16_t vif_list_index; 1599255736Sdavidch uint8_t func_bit_map; 1600255736Sdavidch uint8_t afex_vif_list_command; 1601255736Sdavidch uint8_t func_to_clear; 1602255736Sdavidch}; 1603255736Sdavidchstruct ecore_func_tx_start_params { 1604255736Sdavidch struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; 1605255736Sdavidch uint8_t dcb_enabled; 1606255736Sdavidch uint8_t dcb_version; 1607255736Sdavidch uint8_t dont_add_pri_0; 1608255736Sdavidch}; 1609255736Sdavidch 1610255736Sdavidchstruct ecore_func_state_params { 1611255736Sdavidch struct ecore_func_sp_obj *f_obj; 1612255736Sdavidch 1613255736Sdavidch /* Current command */ 1614255736Sdavidch enum ecore_func_cmd cmd; 1615255736Sdavidch 1616255736Sdavidch /* may have RAMROD_COMP_WAIT set only */ 1617255736Sdavidch unsigned long ramrod_flags; 1618255736Sdavidch 1619255736Sdavidch /* Params according to the current command */ 1620255736Sdavidch union { 1621255736Sdavidch struct ecore_func_hw_init_params hw_init; 1622255736Sdavidch struct ecore_func_hw_reset_params hw_reset; 1623255736Sdavidch struct ecore_func_start_params start; 1624255736Sdavidch struct ecore_func_switch_update_params switch_update; 1625255736Sdavidch struct ecore_func_afex_update_params afex_update; 1626255736Sdavidch struct ecore_func_afex_viflists_params afex_viflists; 1627255736Sdavidch struct ecore_func_tx_start_params tx_start; 1628255736Sdavidch } params; 1629255736Sdavidch}; 1630255736Sdavidch 1631255736Sdavidchstruct ecore_func_sp_drv_ops { 1632255736Sdavidch /* Init tool + runtime initialization: 1633255736Sdavidch * - Common Chip 1634255736Sdavidch * - Common (per Path) 1635255736Sdavidch * - Port 1636255736Sdavidch * - Function phases 1637255736Sdavidch */ 1638255736Sdavidch int (*init_hw_cmn_chip)(struct bxe_softc *sc); 1639255736Sdavidch int (*init_hw_cmn)(struct bxe_softc *sc); 1640255736Sdavidch int (*init_hw_port)(struct bxe_softc *sc); 1641255736Sdavidch int (*init_hw_func)(struct bxe_softc *sc); 1642255736Sdavidch 1643255736Sdavidch /* Reset Function HW: Common, Port, Function phases. */ 1644255736Sdavidch void (*reset_hw_cmn)(struct bxe_softc *sc); 1645255736Sdavidch void (*reset_hw_port)(struct bxe_softc *sc); 1646255736Sdavidch void (*reset_hw_func)(struct bxe_softc *sc); 1647255736Sdavidch 1648255736Sdavidch /* Init/Free GUNZIP resources */ 1649255736Sdavidch int (*gunzip_init)(struct bxe_softc *sc); 1650255736Sdavidch void (*gunzip_end)(struct bxe_softc *sc); 1651255736Sdavidch 1652255736Sdavidch /* Prepare/Release FW resources */ 1653255736Sdavidch int (*init_fw)(struct bxe_softc *sc); 1654255736Sdavidch void (*release_fw)(struct bxe_softc *sc); 1655255736Sdavidch}; 1656255736Sdavidch 1657255736Sdavidchstruct ecore_func_sp_obj { 1658255736Sdavidch enum ecore_func_state state, next_state; 1659255736Sdavidch 1660255736Sdavidch /* ECORE_FUNC_CMD_XX bits. This object implements "one 1661255736Sdavidch * pending" paradigm but for debug and tracing purposes it's 1662255736Sdavidch * more convenient to have different bits for different 1663255736Sdavidch * commands. 1664255736Sdavidch */ 1665255736Sdavidch unsigned long pending; 1666255736Sdavidch 1667255736Sdavidch /* Buffer to use as a ramrod data and its mapping */ 1668255736Sdavidch void *rdata; 1669255736Sdavidch ecore_dma_addr_t rdata_mapping; 1670255736Sdavidch 1671255736Sdavidch /* Buffer to use as a afex ramrod data and its mapping. 1672255736Sdavidch * This can't be same rdata as above because afex ramrod requests 1673255736Sdavidch * can arrive to the object in parallel to other ramrod requests. 1674255736Sdavidch */ 1675255736Sdavidch void *afex_rdata; 1676255736Sdavidch ecore_dma_addr_t afex_rdata_mapping; 1677255736Sdavidch 1678255736Sdavidch /* this mutex validates that when pending flag is taken, the next 1679255736Sdavidch * ramrod to be sent will be the one set the pending bit 1680255736Sdavidch */ 1681255736Sdavidch ECORE_MUTEX one_pending_mutex; 1682255736Sdavidch 1683255736Sdavidch /* Driver interface */ 1684255736Sdavidch struct ecore_func_sp_drv_ops *drv; 1685255736Sdavidch 1686255736Sdavidch /** 1687255736Sdavidch * Performs one state change according to the given parameters. 1688255736Sdavidch * 1689255736Sdavidch * @return 0 in case of success and negative value otherwise. 1690255736Sdavidch */ 1691255736Sdavidch int (*send_cmd)(struct bxe_softc *sc, 1692255736Sdavidch struct ecore_func_state_params *params); 1693255736Sdavidch 1694255736Sdavidch /** 1695255736Sdavidch * Checks that the requested state transition is legal. 1696255736Sdavidch */ 1697255736Sdavidch int (*check_transition)(struct bxe_softc *sc, 1698255736Sdavidch struct ecore_func_sp_obj *o, 1699255736Sdavidch struct ecore_func_state_params *params); 1700255736Sdavidch 1701255736Sdavidch /** 1702255736Sdavidch * Completes the pending command. 1703255736Sdavidch */ 1704255736Sdavidch int (*complete_cmd)(struct bxe_softc *sc, 1705255736Sdavidch struct ecore_func_sp_obj *o, 1706255736Sdavidch enum ecore_func_cmd cmd); 1707255736Sdavidch 1708255736Sdavidch int (*wait_comp)(struct bxe_softc *sc, struct ecore_func_sp_obj *o, 1709255736Sdavidch enum ecore_func_cmd cmd); 1710255736Sdavidch}; 1711255736Sdavidch 1712255736Sdavidch/********************** Interfaces ********************************************/ 1713255736Sdavidch/* Queueable objects set */ 1714255736Sdavidchunion ecore_qable_obj { 1715255736Sdavidch struct ecore_vlan_mac_obj vlan_mac; 1716255736Sdavidch}; 1717255736Sdavidch/************** Function state update *********/ 1718255736Sdavidchvoid ecore_init_func_obj(struct bxe_softc *sc, 1719255736Sdavidch struct ecore_func_sp_obj *obj, 1720255736Sdavidch void *rdata, ecore_dma_addr_t rdata_mapping, 1721255736Sdavidch void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, 1722255736Sdavidch struct ecore_func_sp_drv_ops *drv_iface); 1723255736Sdavidch 1724255736Sdavidchint ecore_func_state_change(struct bxe_softc *sc, 1725255736Sdavidch struct ecore_func_state_params *params); 1726255736Sdavidch 1727255736Sdavidchenum ecore_func_state ecore_func_get_state(struct bxe_softc *sc, 1728255736Sdavidch struct ecore_func_sp_obj *o); 1729255736Sdavidch/******************* Queue State **************/ 1730255736Sdavidchvoid ecore_init_queue_obj(struct bxe_softc *sc, 1731255736Sdavidch struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids, 1732255736Sdavidch uint8_t cid_cnt, uint8_t func_id, void *rdata, 1733255736Sdavidch ecore_dma_addr_t rdata_mapping, unsigned long type); 1734255736Sdavidch 1735255736Sdavidchint ecore_queue_state_change(struct bxe_softc *sc, 1736255736Sdavidch struct ecore_queue_state_params *params); 1737255736Sdavidch 1738255736Sdavidchint ecore_get_q_logical_state(struct bxe_softc *sc, 1739255736Sdavidch struct ecore_queue_sp_obj *obj); 1740255736Sdavidch 1741255736Sdavidch/********************* VLAN-MAC ****************/ 1742255736Sdavidchvoid ecore_init_mac_obj(struct bxe_softc *sc, 1743255736Sdavidch struct ecore_vlan_mac_obj *mac_obj, 1744255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 1745255736Sdavidch ecore_dma_addr_t rdata_mapping, int state, 1746255736Sdavidch unsigned long *pstate, ecore_obj_type type, 1747255736Sdavidch struct ecore_credit_pool_obj *macs_pool); 1748255736Sdavidch 1749255736Sdavidchvoid ecore_init_vlan_obj(struct bxe_softc *sc, 1750255736Sdavidch struct ecore_vlan_mac_obj *vlan_obj, 1751255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 1752255736Sdavidch ecore_dma_addr_t rdata_mapping, int state, 1753255736Sdavidch unsigned long *pstate, ecore_obj_type type, 1754255736Sdavidch struct ecore_credit_pool_obj *vlans_pool); 1755255736Sdavidch 1756255736Sdavidchvoid ecore_init_vlan_mac_obj(struct bxe_softc *sc, 1757255736Sdavidch struct ecore_vlan_mac_obj *vlan_mac_obj, 1758255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, 1759255736Sdavidch ecore_dma_addr_t rdata_mapping, int state, 1760255736Sdavidch unsigned long *pstate, ecore_obj_type type, 1761255736Sdavidch struct ecore_credit_pool_obj *macs_pool, 1762255736Sdavidch struct ecore_credit_pool_obj *vlans_pool); 1763255736Sdavidch 1764255736Sdavidchint ecore_vlan_mac_h_read_lock(struct bxe_softc *sc, 1765255736Sdavidch struct ecore_vlan_mac_obj *o); 1766255736Sdavidchvoid ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc, 1767255736Sdavidch struct ecore_vlan_mac_obj *o); 1768255736Sdavidchint ecore_vlan_mac_h_write_lock(struct bxe_softc *sc, 1769255736Sdavidch struct ecore_vlan_mac_obj *o); 1770255736Sdavidchvoid ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc, 1771255736Sdavidch struct ecore_vlan_mac_obj *o); 1772255736Sdavidchint ecore_config_vlan_mac(struct bxe_softc *sc, 1773255736Sdavidch struct ecore_vlan_mac_ramrod_params *p); 1774255736Sdavidch 1775255736Sdavidchint ecore_vlan_mac_move(struct bxe_softc *sc, 1776255736Sdavidch struct ecore_vlan_mac_ramrod_params *p, 1777255736Sdavidch struct ecore_vlan_mac_obj *dest_o); 1778255736Sdavidch 1779255736Sdavidch/********************* RX MODE ****************/ 1780255736Sdavidch 1781255736Sdavidchvoid ecore_init_rx_mode_obj(struct bxe_softc *sc, 1782255736Sdavidch struct ecore_rx_mode_obj *o); 1783255736Sdavidch 1784255736Sdavidch/** 1785255736Sdavidch * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. 1786255736Sdavidch * 1787255736Sdavidch * @p: Command parameters 1788255736Sdavidch * 1789255736Sdavidch * Return: 0 - if operation was successful and there is no pending completions, 1790255736Sdavidch * positive number - if there are pending completions, 1791255736Sdavidch * negative - if there were errors 1792255736Sdavidch */ 1793255736Sdavidchint ecore_config_rx_mode(struct bxe_softc *sc, 1794255736Sdavidch struct ecore_rx_mode_ramrod_params *p); 1795255736Sdavidch 1796255736Sdavidch/****************** MULTICASTS ****************/ 1797255736Sdavidch 1798255736Sdavidchvoid ecore_init_mcast_obj(struct bxe_softc *sc, 1799255736Sdavidch struct ecore_mcast_obj *mcast_obj, 1800255736Sdavidch uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id, 1801255736Sdavidch uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping, 1802255736Sdavidch int state, unsigned long *pstate, 1803255736Sdavidch ecore_obj_type type); 1804255736Sdavidch 1805255736Sdavidch/** 1806255736Sdavidch * ecore_config_mcast - Configure multicast MACs list. 1807255736Sdavidch * 1808255736Sdavidch * @cmd: command to execute: BNX2X_MCAST_CMD_X 1809255736Sdavidch * 1810255736Sdavidch * May configure a new list 1811255736Sdavidch * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up 1812255736Sdavidch * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current 1813255736Sdavidch * configuration, continue to execute the pending commands 1814255736Sdavidch * (ECORE_MCAST_CMD_CONT). 1815255736Sdavidch * 1816255736Sdavidch * If previous command is still pending or if number of MACs to 1817255736Sdavidch * configure is more that maximum number of MACs in one command, 1818255736Sdavidch * the current command will be enqueued to the tail of the 1819255736Sdavidch * pending commands list. 1820255736Sdavidch * 1821255736Sdavidch * Return: 0 is operation was successfull and there are no pending completions, 1822255736Sdavidch * negative if there were errors, positive if there are pending 1823255736Sdavidch * completions. 1824255736Sdavidch */ 1825255736Sdavidchint ecore_config_mcast(struct bxe_softc *sc, 1826255736Sdavidch struct ecore_mcast_ramrod_params *p, 1827255736Sdavidch enum ecore_mcast_cmd cmd); 1828255736Sdavidch 1829255736Sdavidch/****************** CREDIT POOL ****************/ 1830255736Sdavidchvoid ecore_init_mac_credit_pool(struct bxe_softc *sc, 1831255736Sdavidch struct ecore_credit_pool_obj *p, uint8_t func_id, 1832255736Sdavidch uint8_t func_num); 1833255736Sdavidchvoid ecore_init_vlan_credit_pool(struct bxe_softc *sc, 1834255736Sdavidch struct ecore_credit_pool_obj *p, uint8_t func_id, 1835255736Sdavidch uint8_t func_num); 1836255736Sdavidch 1837255736Sdavidch/****************** RSS CONFIGURATION ****************/ 1838255736Sdavidchvoid ecore_init_rss_config_obj(struct bxe_softc *sc, 1839255736Sdavidch struct ecore_rss_config_obj *rss_obj, 1840255736Sdavidch uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id, 1841255736Sdavidch void *rdata, ecore_dma_addr_t rdata_mapping, 1842255736Sdavidch int state, unsigned long *pstate, 1843255736Sdavidch ecore_obj_type type); 1844255736Sdavidch 1845255736Sdavidch/** 1846255736Sdavidch * ecore_config_rss - Updates RSS configuration according to provided parameters 1847255736Sdavidch * 1848255736Sdavidch * Return: 0 in case of success 1849255736Sdavidch */ 1850255736Sdavidchint ecore_config_rss(struct bxe_softc *sc, 1851255736Sdavidch struct ecore_config_rss_params *p); 1852255736Sdavidch 1853255736Sdavidch/** 1854255736Sdavidch * ecore_get_rss_ind_table - Return the current ind_table configuration. 1855255736Sdavidch * 1856255736Sdavidch * @ind_table: buffer to fill with the current indirection 1857255736Sdavidch * table content. Should be at least 1858255736Sdavidch * T_ETH_INDIRECTION_TABLE_SIZE bytes long. 1859255736Sdavidch */ 1860255736Sdavidchvoid ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj, 1861255736Sdavidch uint8_t *ind_table); 1862255736Sdavidch 1863258203Sedavis/* set as inline so printout will show the offending function */ 1864258203Sedavisint validate_vlan_mac(struct bxe_softc *sc, 1865258203Sedavis struct ecore_vlan_mac_obj *vlan_mac); 1866255736Sdavidch 1867255736Sdavidch#endif /* ECORE_SP_H */ 1868255736Sdavidch 1869