efsys.h revision 350409
1/*- 2 * Copyright (c) 2010-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * The views and conclusions contained in the software and documentation are 30 * those of the authors and should not be interpreted as representing official 31 * policies, either expressed or implied, of the FreeBSD Project. 32 * 33 * $FreeBSD: stable/11/sys/dev/sfxge/common/efsys.h 350409 2019-07-29 10:41:21Z arybchik $ 34 */ 35 36#ifndef _SYS_EFSYS_H 37#define _SYS_EFSYS_H 38 39#ifdef __cplusplus 40extern "C" { 41#endif 42 43#include <sys/param.h> 44#include <sys/bus.h> 45#include <sys/endian.h> 46#include <sys/lock.h> 47#include <sys/malloc.h> 48#include <sys/mbuf.h> 49#include <sys/mutex.h> 50#include <sys/rwlock.h> 51#include <sys/sdt.h> 52#include <sys/systm.h> 53 54#include <machine/bus.h> 55#include <machine/endian.h> 56 57#define EFSYS_HAS_UINT64 1 58#if defined(__x86_64__) 59#define EFSYS_USE_UINT64 1 60#else 61#define EFSYS_USE_UINT64 0 62#endif 63#define EFSYS_HAS_SSE2_M128 0 64#if _BYTE_ORDER == _BIG_ENDIAN 65#define EFSYS_IS_BIG_ENDIAN 1 66#define EFSYS_IS_LITTLE_ENDIAN 0 67#elif _BYTE_ORDER == _LITTLE_ENDIAN 68#define EFSYS_IS_BIG_ENDIAN 0 69#define EFSYS_IS_LITTLE_ENDIAN 1 70#endif 71#include "efx_types.h" 72 73/* Common code requires this */ 74#if __FreeBSD_version < 800068 75#define memmove(d, s, l) bcopy(s, d, l) 76#endif 77 78/* FreeBSD equivalents of Solaris things */ 79#ifndef _NOTE 80#define _NOTE(s) 81#endif 82 83#ifndef B_FALSE 84#define B_FALSE FALSE 85#endif 86#ifndef B_TRUE 87#define B_TRUE TRUE 88#endif 89 90#ifndef IS_P2ALIGNED 91#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) 92#endif 93 94#ifndef P2ALIGN 95#define P2ALIGN(_x, _a) ((_x) & -(_a)) 96#endif 97 98#ifndef IS2P 99#define ISP2(x) (((x) & ((x) - 1)) == 0) 100#endif 101 102#if defined(__x86_64__) && __FreeBSD_version >= 1000000 103 104#define SFXGE_USE_BUS_SPACE_8 1 105 106#if !defined(bus_space_read_stream_8) 107 108#define bus_space_read_stream_8(t, h, o) \ 109 bus_space_read_8((t), (h), (o)) 110 111#define bus_space_write_stream_8(t, h, o, v) \ 112 bus_space_write_8((t), (h), (o), (v)) 113 114#endif 115 116#endif 117 118#define ENOTACTIVE EINVAL 119 120/* Memory type to use on FreeBSD */ 121MALLOC_DECLARE(M_SFXGE); 122 123/* Machine dependend prefetch wrappers */ 124#if defined(__i386__) || defined(__amd64__) 125static __inline void 126prefetch_read_many(void *addr) 127{ 128 129 __asm__( 130 "prefetcht0 (%0)" 131 : 132 : "r" (addr)); 133} 134 135static __inline void 136prefetch_read_once(void *addr) 137{ 138 139 __asm__( 140 "prefetchnta (%0)" 141 : 142 : "r" (addr)); 143} 144#elif defined(__sparc64__) 145static __inline void 146prefetch_read_many(void *addr) 147{ 148 149 __asm__( 150 "prefetch [%0], 0" 151 : 152 : "r" (addr)); 153} 154 155static __inline void 156prefetch_read_once(void *addr) 157{ 158 159 __asm__( 160 "prefetch [%0], 1" 161 : 162 : "r" (addr)); 163} 164#else 165static __inline void 166prefetch_read_many(void *addr) 167{ 168 169} 170 171static __inline void 172prefetch_read_once(void *addr) 173{ 174 175} 176#endif 177 178#if defined(__i386__) || defined(__amd64__) 179#include <vm/vm.h> 180#include <vm/pmap.h> 181#endif 182static __inline void 183sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, 184 struct mbuf *m, bus_dma_segment_t *seg) 185{ 186#if defined(__i386__) || defined(__amd64__) 187 seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); 188 seg->ds_len = m->m_len; 189#else 190 int nsegstmp; 191 192 bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); 193#endif 194} 195 196/* Modifiers used for Windows builds */ 197#define __in 198#define __in_opt 199#define __in_ecount(_n) 200#define __in_ecount_opt(_n) 201#define __in_bcount(_n) 202#define __in_bcount_opt(_n) 203 204#define __out 205#define __out_opt 206#define __out_ecount(_n) 207#define __out_ecount_opt(_n) 208#define __out_bcount(_n) 209#define __out_bcount_opt(_n) 210#define __out_bcount_part(_n, _l) 211#define __out_bcount_part_opt(_n, _l) 212 213#define __deref_out 214 215#define __inout 216#define __inout_opt 217#define __inout_ecount(_n) 218#define __inout_ecount_opt(_n) 219#define __inout_bcount(_n) 220#define __inout_bcount_opt(_n) 221#define __inout_bcount_full_opt(_n) 222 223#define __deref_out_bcount_opt(n) 224 225#define __checkReturn 226#define __success(_x) 227 228#define __drv_when(_p, _c) 229 230/* Code inclusion options */ 231 232 233#define EFSYS_OPT_NAMES 1 234 235#define EFSYS_OPT_SIENA 1 236#define EFSYS_OPT_HUNTINGTON 1 237#define EFSYS_OPT_MEDFORD 1 238#ifdef DEBUG 239#define EFSYS_OPT_CHECK_REG 1 240#else 241#define EFSYS_OPT_CHECK_REG 0 242#endif 243 244#define EFSYS_OPT_MCDI 1 245#define EFSYS_OPT_MCDI_LOGGING 0 246#define EFSYS_OPT_MCDI_PROXY_AUTH 0 247 248#define EFSYS_OPT_MAC_STATS 1 249 250#define EFSYS_OPT_LOOPBACK 0 251 252#define EFSYS_OPT_MON_MCDI 0 253#define EFSYS_OPT_MON_STATS 0 254 255#define EFSYS_OPT_PHY_STATS 1 256#define EFSYS_OPT_BIST 1 257#define EFSYS_OPT_PHY_LED_CONTROL 1 258#define EFSYS_OPT_PHY_FLAGS 0 259 260#define EFSYS_OPT_VPD 1 261#define EFSYS_OPT_NVRAM 1 262#define EFSYS_OPT_BOOTCFG 0 263 264#define EFSYS_OPT_DIAG 0 265#define EFSYS_OPT_RX_SCALE 1 266#define EFSYS_OPT_QSTATS 1 267#define EFSYS_OPT_FILTER 1 268#define EFSYS_OPT_RX_SCATTER 0 269 270#define EFSYS_OPT_EV_PREFETCH 0 271 272#define EFSYS_OPT_DECODE_INTR_FATAL 1 273 274#define EFSYS_OPT_LICENSING 0 275 276#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0 277 278/* ID */ 279 280typedef struct __efsys_identifier_s efsys_identifier_t; 281 282/* PROBE */ 283 284#ifndef DTRACE_PROBE 285 286#define EFSYS_PROBE(_name) 287 288#define EFSYS_PROBE1(_name, _type1, _arg1) 289 290#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) 291 292#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 293 _type3, _arg3) 294 295#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 296 _type3, _arg3, _type4, _arg4) 297 298#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 299 _type3, _arg3, _type4, _arg4, _type5, _arg5) 300 301#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 302 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 303 _type6, _arg6) 304 305#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 306 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 307 _type6, _arg6, _type7, _arg7) 308 309#else /* DTRACE_PROBE */ 310 311#define EFSYS_PROBE(_name) \ 312 DTRACE_PROBE(_name) 313 314#define EFSYS_PROBE1(_name, _type1, _arg1) \ 315 DTRACE_PROBE1(_name, _type1, _arg1) 316 317#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ 318 DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2) 319 320#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 321 _type3, _arg3) \ 322 DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 323 _type3, _arg3) 324 325#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 326 _type3, _arg3, _type4, _arg4) \ 327 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 328 _type3, _arg3, _type4, _arg4) 329 330#ifdef DTRACE_PROBE5 331#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 332 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 333 DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 334 _type3, _arg3, _type4, _arg4, _type5, _arg5) 335#else 336#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 337 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 338 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 339 _type3, _arg3, _type4, _arg4) 340#endif 341 342#ifdef DTRACE_PROBE6 343#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 344 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 345 _type6, _arg6) \ 346 DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 347 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 348 _type6, _arg6) 349#else 350#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 351 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 352 _type6, _arg6) \ 353 EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 354 _type3, _arg3, _type4, _arg4, _type5, _arg5) 355#endif 356 357#ifdef DTRACE_PROBE7 358#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 359 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 360 _type6, _arg6, _type7, _arg7) \ 361 DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 362 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 363 _type6, _arg6, _type7, _arg7) 364#else 365#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 366 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 367 _type6, _arg6, _type7, _arg7) \ 368 EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 369 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 370 _type6, _arg6) 371#endif 372 373#endif /* DTRACE_PROBE */ 374 375/* DMA */ 376 377typedef uint64_t efsys_dma_addr_t; 378 379typedef struct efsys_mem_s { 380 bus_dma_tag_t esm_tag; 381 bus_dmamap_t esm_map; 382 caddr_t esm_base; 383 efsys_dma_addr_t esm_addr; 384} efsys_mem_t; 385 386 387#define EFSYS_MEM_ZERO(_esmp, _size) \ 388 do { \ 389 (void) memset((_esmp)->esm_base, 0, (_size)); \ 390 \ 391 _NOTE(CONSTANTCONDITION) \ 392 } while (B_FALSE) 393 394#define EFSYS_MEM_READD(_esmp, _offset, _edp) \ 395 do { \ 396 uint32_t *addr; \ 397 \ 398 _NOTE(CONSTANTCONDITION) \ 399 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 400 ("not power of 2 aligned")); \ 401 \ 402 addr = (void *)((_esmp)->esm_base + (_offset)); \ 403 \ 404 (_edp)->ed_u32[0] = *addr; \ 405 \ 406 EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \ 407 uint32_t, (_edp)->ed_u32[0]); \ 408 \ 409 _NOTE(CONSTANTCONDITION) \ 410 } while (B_FALSE) 411 412#if defined(__x86_64__) 413#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 414 do { \ 415 uint64_t *addr; \ 416 \ 417 _NOTE(CONSTANTCONDITION) \ 418 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 419 ("not power of 2 aligned")); \ 420 \ 421 addr = (void *)((_esmp)->esm_base + (_offset)); \ 422 \ 423 (_eqp)->eq_u64[0] = *addr; \ 424 \ 425 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 426 uint32_t, (_eqp)->eq_u32[1], \ 427 uint32_t, (_eqp)->eq_u32[0]); \ 428 \ 429 _NOTE(CONSTANTCONDITION) \ 430 } while (B_FALSE) 431#else 432#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 433 do { \ 434 uint32_t *addr; \ 435 \ 436 _NOTE(CONSTANTCONDITION) \ 437 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 438 ("not power of 2 aligned")); \ 439 \ 440 addr = (void *)((_esmp)->esm_base + (_offset)); \ 441 \ 442 (_eqp)->eq_u32[0] = *addr++; \ 443 (_eqp)->eq_u32[1] = *addr; \ 444 \ 445 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 446 uint32_t, (_eqp)->eq_u32[1], \ 447 uint32_t, (_eqp)->eq_u32[0]); \ 448 \ 449 _NOTE(CONSTANTCONDITION) \ 450 } while (B_FALSE) 451#endif 452 453#if defined(__x86_64__) 454#define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 455 do { \ 456 uint64_t *addr; \ 457 \ 458 _NOTE(CONSTANTCONDITION) \ 459 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 460 ("not power of 2 aligned")); \ 461 \ 462 addr = (void *)((_esmp)->esm_base + (_offset)); \ 463 \ 464 (_eop)->eo_u64[0] = *addr++; \ 465 (_eop)->eo_u64[1] = *addr; \ 466 \ 467 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 468 uint32_t, (_eop)->eo_u32[3], \ 469 uint32_t, (_eop)->eo_u32[2], \ 470 uint32_t, (_eop)->eo_u32[1], \ 471 uint32_t, (_eop)->eo_u32[0]); \ 472 \ 473 _NOTE(CONSTANTCONDITION) \ 474 } while (B_FALSE) 475#else 476#define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 477 do { \ 478 uint32_t *addr; \ 479 \ 480 _NOTE(CONSTANTCONDITION) \ 481 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 482 ("not power of 2 aligned")); \ 483 \ 484 addr = (void *)((_esmp)->esm_base + (_offset)); \ 485 \ 486 (_eop)->eo_u32[0] = *addr++; \ 487 (_eop)->eo_u32[1] = *addr++; \ 488 (_eop)->eo_u32[2] = *addr++; \ 489 (_eop)->eo_u32[3] = *addr; \ 490 \ 491 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 492 uint32_t, (_eop)->eo_u32[3], \ 493 uint32_t, (_eop)->eo_u32[2], \ 494 uint32_t, (_eop)->eo_u32[1], \ 495 uint32_t, (_eop)->eo_u32[0]); \ 496 \ 497 _NOTE(CONSTANTCONDITION) \ 498 } while (B_FALSE) 499#endif 500 501#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ 502 do { \ 503 uint32_t *addr; \ 504 \ 505 _NOTE(CONSTANTCONDITION) \ 506 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 507 ("not power of 2 aligned")); \ 508 \ 509 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ 510 uint32_t, (_edp)->ed_u32[0]); \ 511 \ 512 addr = (void *)((_esmp)->esm_base + (_offset)); \ 513 \ 514 *addr = (_edp)->ed_u32[0]; \ 515 \ 516 _NOTE(CONSTANTCONDITION) \ 517 } while (B_FALSE) 518 519#if defined(__x86_64__) 520#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 521 do { \ 522 uint64_t *addr; \ 523 \ 524 _NOTE(CONSTANTCONDITION) \ 525 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 526 ("not power of 2 aligned")); \ 527 \ 528 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 529 uint32_t, (_eqp)->eq_u32[1], \ 530 uint32_t, (_eqp)->eq_u32[0]); \ 531 \ 532 addr = (void *)((_esmp)->esm_base + (_offset)); \ 533 \ 534 *addr = (_eqp)->eq_u64[0]; \ 535 \ 536 _NOTE(CONSTANTCONDITION) \ 537 } while (B_FALSE) 538 539#else 540#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 541 do { \ 542 uint32_t *addr; \ 543 \ 544 _NOTE(CONSTANTCONDITION) \ 545 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 546 ("not power of 2 aligned")); \ 547 \ 548 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 549 uint32_t, (_eqp)->eq_u32[1], \ 550 uint32_t, (_eqp)->eq_u32[0]); \ 551 \ 552 addr = (void *)((_esmp)->esm_base + (_offset)); \ 553 \ 554 *addr++ = (_eqp)->eq_u32[0]; \ 555 *addr = (_eqp)->eq_u32[1]; \ 556 \ 557 _NOTE(CONSTANTCONDITION) \ 558 } while (B_FALSE) 559#endif 560 561#if defined(__x86_64__) 562#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 563 do { \ 564 uint64_t *addr; \ 565 \ 566 _NOTE(CONSTANTCONDITION) \ 567 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 568 ("not power of 2 aligned")); \ 569 \ 570 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 571 uint32_t, (_eop)->eo_u32[3], \ 572 uint32_t, (_eop)->eo_u32[2], \ 573 uint32_t, (_eop)->eo_u32[1], \ 574 uint32_t, (_eop)->eo_u32[0]); \ 575 \ 576 addr = (void *)((_esmp)->esm_base + (_offset)); \ 577 \ 578 *addr++ = (_eop)->eo_u64[0]; \ 579 *addr = (_eop)->eo_u64[1]; \ 580 \ 581 _NOTE(CONSTANTCONDITION) \ 582 } while (B_FALSE) 583#else 584#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 585 do { \ 586 uint32_t *addr; \ 587 \ 588 _NOTE(CONSTANTCONDITION) \ 589 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 590 ("not power of 2 aligned")); \ 591 \ 592 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 593 uint32_t, (_eop)->eo_u32[3], \ 594 uint32_t, (_eop)->eo_u32[2], \ 595 uint32_t, (_eop)->eo_u32[1], \ 596 uint32_t, (_eop)->eo_u32[0]); \ 597 \ 598 addr = (void *)((_esmp)->esm_base + (_offset)); \ 599 \ 600 *addr++ = (_eop)->eo_u32[0]; \ 601 *addr++ = (_eop)->eo_u32[1]; \ 602 *addr++ = (_eop)->eo_u32[2]; \ 603 *addr = (_eop)->eo_u32[3]; \ 604 \ 605 _NOTE(CONSTANTCONDITION) \ 606 } while (B_FALSE) 607#endif 608 609#define EFSYS_MEM_ADDR(_esmp) \ 610 ((_esmp)->esm_addr) 611 612#define EFSYS_MEM_IS_NULL(_esmp) \ 613 ((_esmp)->esm_base == NULL) 614 615/* BAR */ 616 617#define SFXGE_LOCK_NAME_MAX 16 618 619typedef struct efsys_bar_s { 620 struct mtx esb_lock; 621 char esb_lock_name[SFXGE_LOCK_NAME_MAX]; 622 bus_space_tag_t esb_tag; 623 bus_space_handle_t esb_handle; 624 int esb_rid; 625 struct resource *esb_res; 626} efsys_bar_t; 627 628#define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \ 629 do { \ 630 snprintf((_esbp)->esb_lock_name, \ 631 sizeof((_esbp)->esb_lock_name), \ 632 "%s:bar", (_ifname)); \ 633 mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \ 634 NULL, MTX_DEF); \ 635 _NOTE(CONSTANTCONDITION) \ 636 } while (B_FALSE) 637#define SFXGE_BAR_LOCK_DESTROY(_esbp) \ 638 mtx_destroy(&(_esbp)->esb_lock) 639#define SFXGE_BAR_LOCK(_esbp) \ 640 mtx_lock(&(_esbp)->esb_lock) 641#define SFXGE_BAR_UNLOCK(_esbp) \ 642 mtx_unlock(&(_esbp)->esb_lock) 643 644#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ 645 do { \ 646 _NOTE(CONSTANTCONDITION) \ 647 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 648 ("not power of 2 aligned")); \ 649 \ 650 _NOTE(CONSTANTCONDITION) \ 651 if (_lock) \ 652 SFXGE_BAR_LOCK(_esbp); \ 653 \ 654 (_edp)->ed_u32[0] = bus_space_read_stream_4( \ 655 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 656 (_offset)); \ 657 \ 658 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ 659 uint32_t, (_edp)->ed_u32[0]); \ 660 \ 661 _NOTE(CONSTANTCONDITION) \ 662 if (_lock) \ 663 SFXGE_BAR_UNLOCK(_esbp); \ 664 _NOTE(CONSTANTCONDITION) \ 665 } while (B_FALSE) 666 667#if defined(SFXGE_USE_BUS_SPACE_8) 668#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 669 do { \ 670 _NOTE(CONSTANTCONDITION) \ 671 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 672 ("not power of 2 aligned")); \ 673 \ 674 SFXGE_BAR_LOCK(_esbp); \ 675 \ 676 (_eqp)->eq_u64[0] = bus_space_read_stream_8( \ 677 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 678 (_offset)); \ 679 \ 680 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 681 uint32_t, (_eqp)->eq_u32[1], \ 682 uint32_t, (_eqp)->eq_u32[0]); \ 683 \ 684 SFXGE_BAR_UNLOCK(_esbp); \ 685 _NOTE(CONSTANTCONDITION) \ 686 } while (B_FALSE) 687 688#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 689 do { \ 690 _NOTE(CONSTANTCONDITION) \ 691 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 692 ("not power of 2 aligned")); \ 693 \ 694 _NOTE(CONSTANTCONDITION) \ 695 if (_lock) \ 696 SFXGE_BAR_LOCK(_esbp); \ 697 \ 698 (_eop)->eo_u64[0] = bus_space_read_stream_8( \ 699 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 700 (_offset)); \ 701 (_eop)->eo_u64[1] = bus_space_read_stream_8( \ 702 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 703 (_offset) + 8); \ 704 \ 705 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 706 uint32_t, (_eop)->eo_u32[3], \ 707 uint32_t, (_eop)->eo_u32[2], \ 708 uint32_t, (_eop)->eo_u32[1], \ 709 uint32_t, (_eop)->eo_u32[0]); \ 710 \ 711 _NOTE(CONSTANTCONDITION) \ 712 if (_lock) \ 713 SFXGE_BAR_UNLOCK(_esbp); \ 714 _NOTE(CONSTANTCONDITION) \ 715 } while (B_FALSE) 716 717#else 718#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 719 do { \ 720 _NOTE(CONSTANTCONDITION) \ 721 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 722 ("not power of 2 aligned")); \ 723 \ 724 SFXGE_BAR_LOCK(_esbp); \ 725 \ 726 (_eqp)->eq_u32[0] = bus_space_read_stream_4( \ 727 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 728 (_offset)); \ 729 (_eqp)->eq_u32[1] = bus_space_read_stream_4( \ 730 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 731 (_offset) + 4); \ 732 \ 733 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 734 uint32_t, (_eqp)->eq_u32[1], \ 735 uint32_t, (_eqp)->eq_u32[0]); \ 736 \ 737 SFXGE_BAR_UNLOCK(_esbp); \ 738 _NOTE(CONSTANTCONDITION) \ 739 } while (B_FALSE) 740 741#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 742 do { \ 743 _NOTE(CONSTANTCONDITION) \ 744 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 745 ("not power of 2 aligned")); \ 746 \ 747 _NOTE(CONSTANTCONDITION) \ 748 if (_lock) \ 749 SFXGE_BAR_LOCK(_esbp); \ 750 \ 751 (_eop)->eo_u32[0] = bus_space_read_stream_4( \ 752 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 753 (_offset)); \ 754 (_eop)->eo_u32[1] = bus_space_read_stream_4( \ 755 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 756 (_offset) + 4); \ 757 (_eop)->eo_u32[2] = bus_space_read_stream_4( \ 758 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 759 (_offset) + 8); \ 760 (_eop)->eo_u32[3] = bus_space_read_stream_4( \ 761 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 762 (_offset) + 12); \ 763 \ 764 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 765 uint32_t, (_eop)->eo_u32[3], \ 766 uint32_t, (_eop)->eo_u32[2], \ 767 uint32_t, (_eop)->eo_u32[1], \ 768 uint32_t, (_eop)->eo_u32[0]); \ 769 \ 770 _NOTE(CONSTANTCONDITION) \ 771 if (_lock) \ 772 SFXGE_BAR_UNLOCK(_esbp); \ 773 _NOTE(CONSTANTCONDITION) \ 774 } while (B_FALSE) 775#endif 776 777#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ 778 do { \ 779 _NOTE(CONSTANTCONDITION) \ 780 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 781 ("not power of 2 aligned")); \ 782 \ 783 _NOTE(CONSTANTCONDITION) \ 784 if (_lock) \ 785 SFXGE_BAR_LOCK(_esbp); \ 786 \ 787 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ 788 uint32_t, (_edp)->ed_u32[0]); \ 789 \ 790 /* \ 791 * Make sure that previous writes to the dword have \ 792 * been done. It should be cheaper than barrier just \ 793 * after the write below. \ 794 */ \ 795 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 796 (_offset), sizeof (efx_dword_t), \ 797 BUS_SPACE_BARRIER_WRITE); \ 798 bus_space_write_stream_4((_esbp)->esb_tag, \ 799 (_esbp)->esb_handle, \ 800 (_offset), (_edp)->ed_u32[0]); \ 801 \ 802 _NOTE(CONSTANTCONDITION) \ 803 if (_lock) \ 804 SFXGE_BAR_UNLOCK(_esbp); \ 805 _NOTE(CONSTANTCONDITION) \ 806 } while (B_FALSE) 807 808#if defined(SFXGE_USE_BUS_SPACE_8) 809#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 810 do { \ 811 _NOTE(CONSTANTCONDITION) \ 812 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 813 ("not power of 2 aligned")); \ 814 \ 815 SFXGE_BAR_LOCK(_esbp); \ 816 \ 817 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 818 uint32_t, (_eqp)->eq_u32[1], \ 819 uint32_t, (_eqp)->eq_u32[0]); \ 820 \ 821 /* \ 822 * Make sure that previous writes to the qword have \ 823 * been done. It should be cheaper than barrier just \ 824 * after the write below. \ 825 */ \ 826 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 827 (_offset), sizeof (efx_qword_t), \ 828 BUS_SPACE_BARRIER_WRITE); \ 829 bus_space_write_stream_8((_esbp)->esb_tag, \ 830 (_esbp)->esb_handle, \ 831 (_offset), (_eqp)->eq_u64[0]); \ 832 \ 833 SFXGE_BAR_UNLOCK(_esbp); \ 834 _NOTE(CONSTANTCONDITION) \ 835 } while (B_FALSE) 836#else 837#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 838 do { \ 839 _NOTE(CONSTANTCONDITION) \ 840 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 841 ("not power of 2 aligned")); \ 842 \ 843 SFXGE_BAR_LOCK(_esbp); \ 844 \ 845 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 846 uint32_t, (_eqp)->eq_u32[1], \ 847 uint32_t, (_eqp)->eq_u32[0]); \ 848 \ 849 /* \ 850 * Make sure that previous writes to the qword have \ 851 * been done. It should be cheaper than barrier just \ 852 * after the last write below. \ 853 */ \ 854 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 855 (_offset), sizeof (efx_qword_t), \ 856 BUS_SPACE_BARRIER_WRITE); \ 857 bus_space_write_stream_4((_esbp)->esb_tag, \ 858 (_esbp)->esb_handle, \ 859 (_offset), (_eqp)->eq_u32[0]); \ 860 /* \ 861 * It should be guaranteed that the last dword comes \ 862 * the last, so barrier entire qword to be sure that \ 863 * neither above nor below writes are reordered. \ 864 */ \ 865 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 866 (_offset), sizeof (efx_qword_t), \ 867 BUS_SPACE_BARRIER_WRITE); \ 868 bus_space_write_stream_4((_esbp)->esb_tag, \ 869 (_esbp)->esb_handle, \ 870 (_offset) + 4, (_eqp)->eq_u32[1]); \ 871 \ 872 SFXGE_BAR_UNLOCK(_esbp); \ 873 _NOTE(CONSTANTCONDITION) \ 874 } while (B_FALSE) 875#endif 876 877/* 878 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping 879 * (required by PIO hardware) 880 */ 881#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ 882 do { \ 883 _NOTE(CONSTANTCONDITION) \ 884 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 885 ("not power of 2 aligned")); \ 886 \ 887 (void) (_esbp); \ 888 \ 889 /* FIXME: Perform a 64-bit write */ \ 890 KASSERT(0, ("not implemented")); \ 891 \ 892 _NOTE(CONSTANTCONDITION) \ 893 } while (B_FALSE) 894 895#if defined(SFXGE_USE_BUS_SPACE_8) 896#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 897 do { \ 898 _NOTE(CONSTANTCONDITION) \ 899 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 900 ("not power of 2 aligned")); \ 901 \ 902 _NOTE(CONSTANTCONDITION) \ 903 if (_lock) \ 904 SFXGE_BAR_LOCK(_esbp); \ 905 \ 906 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 907 uint32_t, (_eop)->eo_u32[3], \ 908 uint32_t, (_eop)->eo_u32[2], \ 909 uint32_t, (_eop)->eo_u32[1], \ 910 uint32_t, (_eop)->eo_u32[0]); \ 911 \ 912 /* \ 913 * Make sure that previous writes to the oword have \ 914 * been done. It should be cheaper than barrier just \ 915 * after the last write below. \ 916 */ \ 917 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 918 (_offset), sizeof (efx_oword_t), \ 919 BUS_SPACE_BARRIER_WRITE); \ 920 bus_space_write_stream_8((_esbp)->esb_tag, \ 921 (_esbp)->esb_handle, \ 922 (_offset), (_eop)->eo_u64[0]); \ 923 /* \ 924 * It should be guaranteed that the last qword comes \ 925 * the last, so barrier entire oword to be sure that \ 926 * neither above nor below writes are reordered. \ 927 */ \ 928 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 929 (_offset), sizeof (efx_oword_t), \ 930 BUS_SPACE_BARRIER_WRITE); \ 931 bus_space_write_stream_8((_esbp)->esb_tag, \ 932 (_esbp)->esb_handle, \ 933 (_offset) + 8, (_eop)->eo_u64[1]); \ 934 \ 935 _NOTE(CONSTANTCONDITION) \ 936 if (_lock) \ 937 SFXGE_BAR_UNLOCK(_esbp); \ 938 _NOTE(CONSTANTCONDITION) \ 939 } while (B_FALSE) 940 941#else 942#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 943 do { \ 944 _NOTE(CONSTANTCONDITION) \ 945 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 946 ("not power of 2 aligned")); \ 947 \ 948 _NOTE(CONSTANTCONDITION) \ 949 if (_lock) \ 950 SFXGE_BAR_LOCK(_esbp); \ 951 \ 952 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 953 uint32_t, (_eop)->eo_u32[3], \ 954 uint32_t, (_eop)->eo_u32[2], \ 955 uint32_t, (_eop)->eo_u32[1], \ 956 uint32_t, (_eop)->eo_u32[0]); \ 957 \ 958 /* \ 959 * Make sure that previous writes to the oword have \ 960 * been done. It should be cheaper than barrier just \ 961 * after the last write below. \ 962 */ \ 963 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 964 (_offset), sizeof (efx_oword_t), \ 965 BUS_SPACE_BARRIER_WRITE); \ 966 bus_space_write_stream_4((_esbp)->esb_tag, \ 967 (_esbp)->esb_handle, \ 968 (_offset), (_eop)->eo_u32[0]); \ 969 bus_space_write_stream_4((_esbp)->esb_tag, \ 970 (_esbp)->esb_handle, \ 971 (_offset) + 4, (_eop)->eo_u32[1]); \ 972 bus_space_write_stream_4((_esbp)->esb_tag, \ 973 (_esbp)->esb_handle, \ 974 (_offset) + 8, (_eop)->eo_u32[2]); \ 975 /* \ 976 * It should be guaranteed that the last dword comes \ 977 * the last, so barrier entire oword to be sure that \ 978 * neither above nor below writes are reordered. \ 979 */ \ 980 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 981 (_offset), sizeof (efx_oword_t), \ 982 BUS_SPACE_BARRIER_WRITE); \ 983 bus_space_write_stream_4((_esbp)->esb_tag, \ 984 (_esbp)->esb_handle, \ 985 (_offset) + 12, (_eop)->eo_u32[3]); \ 986 \ 987 _NOTE(CONSTANTCONDITION) \ 988 if (_lock) \ 989 SFXGE_BAR_UNLOCK(_esbp); \ 990 _NOTE(CONSTANTCONDITION) \ 991 } while (B_FALSE) 992#endif 993 994/* Use the standard octo-word write for doorbell writes */ 995#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ 996 do { \ 997 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ 998 _NOTE(CONSTANTCONDITION) \ 999 } while (B_FALSE) 1000 1001/* SPIN */ 1002 1003#define EFSYS_SPIN(_us) \ 1004 do { \ 1005 DELAY(_us); \ 1006 _NOTE(CONSTANTCONDITION) \ 1007 } while (B_FALSE) 1008 1009#define EFSYS_SLEEP EFSYS_SPIN 1010 1011/* BARRIERS */ 1012 1013#define EFSYS_MEM_READ_BARRIER() rmb() 1014#define EFSYS_PIO_WRITE_BARRIER() 1015 1016/* DMA SYNC */ 1017#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \ 1018 do { \ 1019 bus_dmamap_sync((_esmp)->esm_tag, \ 1020 (_esmp)->esm_map, \ 1021 BUS_DMASYNC_POSTREAD); \ 1022 _NOTE(CONSTANTCONDITION) \ 1023 } while (B_FALSE) 1024 1025#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \ 1026 do { \ 1027 bus_dmamap_sync((_esmp)->esm_tag, \ 1028 (_esmp)->esm_map, \ 1029 BUS_DMASYNC_PREWRITE); \ 1030 _NOTE(CONSTANTCONDITION) \ 1031 } while (B_FALSE) 1032 1033/* TIMESTAMP */ 1034 1035typedef clock_t efsys_timestamp_t; 1036 1037#define EFSYS_TIMESTAMP(_usp) \ 1038 do { \ 1039 clock_t now; \ 1040 \ 1041 now = ticks; \ 1042 *(_usp) = now * hz / 1000000; \ 1043 _NOTE(CONSTANTCONDITION) \ 1044 } while (B_FALSE) 1045 1046/* KMEM */ 1047 1048#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ 1049 do { \ 1050 (_esip) = (_esip); \ 1051 /* \ 1052 * The macro is used in non-sleepable contexts, for \ 1053 * example, holding a mutex. \ 1054 */ \ 1055 (_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO); \ 1056 _NOTE(CONSTANTCONDITION) \ 1057 } while (B_FALSE) 1058 1059#define EFSYS_KMEM_FREE(_esip, _size, _p) \ 1060 do { \ 1061 (void) (_esip); \ 1062 (void) (_size); \ 1063 free((_p), M_SFXGE); \ 1064 _NOTE(CONSTANTCONDITION) \ 1065 } while (B_FALSE) 1066 1067/* LOCK */ 1068 1069typedef struct efsys_lock_s { 1070 struct mtx lock; 1071 char lock_name[SFXGE_LOCK_NAME_MAX]; 1072} efsys_lock_t; 1073 1074#define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ 1075 do { \ 1076 efsys_lock_t *__eslp = (_eslp); \ 1077 \ 1078 snprintf((__eslp)->lock_name, \ 1079 sizeof((__eslp)->lock_name), \ 1080 "%s:%s", (_ifname), (_label)); \ 1081 mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \ 1082 NULL, MTX_DEF); \ 1083 } while (B_FALSE) 1084#define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \ 1085 mtx_destroy(&(_eslp)->lock) 1086#define SFXGE_EFSYS_LOCK(_eslp) \ 1087 mtx_lock(&(_eslp)->lock) 1088#define SFXGE_EFSYS_UNLOCK(_eslp) \ 1089 mtx_unlock(&(_eslp)->lock) 1090#define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ 1091 mtx_assert(&(_eslp)->lock, MA_OWNED) 1092 1093typedef int efsys_lock_state_t; 1094 1095#define EFSYS_LOCK_MAGIC 0x000010c4 1096 1097#define EFSYS_LOCK(_lockp, _state) \ 1098 do { \ 1099 SFXGE_EFSYS_LOCK(_lockp); \ 1100 (_state) = EFSYS_LOCK_MAGIC; \ 1101 _NOTE(CONSTANTCONDITION) \ 1102 } while (B_FALSE) 1103 1104#define EFSYS_UNLOCK(_lockp, _state) \ 1105 do { \ 1106 if ((_state) != EFSYS_LOCK_MAGIC) \ 1107 KASSERT(B_FALSE, ("not locked")); \ 1108 SFXGE_EFSYS_UNLOCK(_lockp); \ 1109 _NOTE(CONSTANTCONDITION) \ 1110 } while (B_FALSE) 1111 1112/* STAT */ 1113 1114typedef uint64_t efsys_stat_t; 1115 1116#define EFSYS_STAT_INCR(_knp, _delta) \ 1117 do { \ 1118 *(_knp) += (_delta); \ 1119 _NOTE(CONSTANTCONDITION) \ 1120 } while (B_FALSE) 1121 1122#define EFSYS_STAT_DECR(_knp, _delta) \ 1123 do { \ 1124 *(_knp) -= (_delta); \ 1125 _NOTE(CONSTANTCONDITION) \ 1126 } while (B_FALSE) 1127 1128#define EFSYS_STAT_SET(_knp, _val) \ 1129 do { \ 1130 *(_knp) = (_val); \ 1131 _NOTE(CONSTANTCONDITION) \ 1132 } while (B_FALSE) 1133 1134#define EFSYS_STAT_SET_QWORD(_knp, _valp) \ 1135 do { \ 1136 *(_knp) = le64toh((_valp)->eq_u64[0]); \ 1137 _NOTE(CONSTANTCONDITION) \ 1138 } while (B_FALSE) 1139 1140#define EFSYS_STAT_SET_DWORD(_knp, _valp) \ 1141 do { \ 1142 *(_knp) = le32toh((_valp)->ed_u32[0]); \ 1143 _NOTE(CONSTANTCONDITION) \ 1144 } while (B_FALSE) 1145 1146#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ 1147 do { \ 1148 *(_knp) += le64toh((_valp)->eq_u64[0]); \ 1149 _NOTE(CONSTANTCONDITION) \ 1150 } while (B_FALSE) 1151 1152#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ 1153 do { \ 1154 *(_knp) -= le64toh((_valp)->eq_u64[0]); \ 1155 _NOTE(CONSTANTCONDITION) \ 1156 } while (B_FALSE) 1157 1158/* ERR */ 1159 1160extern void sfxge_err(efsys_identifier_t *, unsigned int, 1161 uint32_t, uint32_t); 1162 1163#if EFSYS_OPT_DECODE_INTR_FATAL 1164#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ 1165 do { \ 1166 sfxge_err((_esip), (_code), (_dword0), (_dword1)); \ 1167 _NOTE(CONSTANTCONDITION) \ 1168 } while (B_FALSE) 1169#endif 1170 1171/* ASSERT */ 1172 1173#define EFSYS_ASSERT(_exp) do { \ 1174 if (!(_exp)) \ 1175 panic("%s", #_exp); \ 1176 } while (0) 1177 1178#define EFSYS_ASSERT3(_x, _op, _y, _t) do { \ 1179 const _t __x = (_t)(_x); \ 1180 const _t __y = (_t)(_y); \ 1181 if (!(__x _op __y)) \ 1182 panic("assertion failed at %s:%u", __FILE__, __LINE__); \ 1183 } while(0) 1184 1185#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) 1186#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) 1187#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) 1188 1189/* ROTATE */ 1190 1191#define EFSYS_HAS_ROTL_DWORD 0 1192 1193#ifdef __cplusplus 1194} 1195#endif 1196 1197#endif /* _SYS_EFSYS_H */ 1198