efsys.h revision 350412
1/*- 2 * Copyright (c) 2010-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * The views and conclusions contained in the software and documentation are 30 * those of the authors and should not be interpreted as representing official 31 * policies, either expressed or implied, of the FreeBSD Project. 32 * 33 * $FreeBSD: stable/11/sys/dev/sfxge/common/efsys.h 350412 2019-07-29 10:44:04Z arybchik $ 34 */ 35 36#ifndef _SYS_EFSYS_H 37#define _SYS_EFSYS_H 38 39#ifdef __cplusplus 40extern "C" { 41#endif 42 43#include <sys/param.h> 44#include <sys/bus.h> 45#include <sys/endian.h> 46#include <sys/lock.h> 47#include <sys/malloc.h> 48#include <sys/mbuf.h> 49#include <sys/mutex.h> 50#include <sys/rwlock.h> 51#include <sys/sdt.h> 52#include <sys/systm.h> 53 54#include <machine/bus.h> 55#include <machine/endian.h> 56 57#define EFSYS_HAS_UINT64 1 58#if defined(__x86_64__) 59#define EFSYS_USE_UINT64 1 60#else 61#define EFSYS_USE_UINT64 0 62#endif 63#define EFSYS_HAS_SSE2_M128 0 64#if _BYTE_ORDER == _BIG_ENDIAN 65#define EFSYS_IS_BIG_ENDIAN 1 66#define EFSYS_IS_LITTLE_ENDIAN 0 67#elif _BYTE_ORDER == _LITTLE_ENDIAN 68#define EFSYS_IS_BIG_ENDIAN 0 69#define EFSYS_IS_LITTLE_ENDIAN 1 70#endif 71#include "efx_types.h" 72 73/* Common code requires this */ 74#if __FreeBSD_version < 800068 75#define memmove(d, s, l) bcopy(s, d, l) 76#endif 77 78/* FreeBSD equivalents of Solaris things */ 79#ifndef _NOTE 80#define _NOTE(s) 81#endif 82 83#ifndef B_FALSE 84#define B_FALSE FALSE 85#endif 86#ifndef B_TRUE 87#define B_TRUE TRUE 88#endif 89 90#ifndef IS2P 91#define ISP2(x) (((x) & ((x) - 1)) == 0) 92#endif 93 94#if defined(__x86_64__) && __FreeBSD_version >= 1000000 95 96#define SFXGE_USE_BUS_SPACE_8 1 97 98#if !defined(bus_space_read_stream_8) 99 100#define bus_space_read_stream_8(t, h, o) \ 101 bus_space_read_8((t), (h), (o)) 102 103#define bus_space_write_stream_8(t, h, o, v) \ 104 bus_space_write_8((t), (h), (o), (v)) 105 106#endif 107 108#endif 109 110#define ENOTACTIVE EINVAL 111 112/* Memory type to use on FreeBSD */ 113MALLOC_DECLARE(M_SFXGE); 114 115/* Machine dependend prefetch wrappers */ 116#if defined(__i386__) || defined(__amd64__) 117static __inline void 118prefetch_read_many(void *addr) 119{ 120 121 __asm__( 122 "prefetcht0 (%0)" 123 : 124 : "r" (addr)); 125} 126 127static __inline void 128prefetch_read_once(void *addr) 129{ 130 131 __asm__( 132 "prefetchnta (%0)" 133 : 134 : "r" (addr)); 135} 136#elif defined(__sparc64__) 137static __inline void 138prefetch_read_many(void *addr) 139{ 140 141 __asm__( 142 "prefetch [%0], 0" 143 : 144 : "r" (addr)); 145} 146 147static __inline void 148prefetch_read_once(void *addr) 149{ 150 151 __asm__( 152 "prefetch [%0], 1" 153 : 154 : "r" (addr)); 155} 156#else 157static __inline void 158prefetch_read_many(void *addr) 159{ 160 161} 162 163static __inline void 164prefetch_read_once(void *addr) 165{ 166 167} 168#endif 169 170#if defined(__i386__) || defined(__amd64__) 171#include <vm/vm.h> 172#include <vm/pmap.h> 173#endif 174static __inline void 175sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, 176 struct mbuf *m, bus_dma_segment_t *seg) 177{ 178#if defined(__i386__) || defined(__amd64__) 179 seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); 180 seg->ds_len = m->m_len; 181#else 182 int nsegstmp; 183 184 bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); 185#endif 186} 187 188/* Modifiers used for Windows builds */ 189#define __in 190#define __in_opt 191#define __in_ecount(_n) 192#define __in_ecount_opt(_n) 193#define __in_bcount(_n) 194#define __in_bcount_opt(_n) 195 196#define __out 197#define __out_opt 198#define __out_ecount(_n) 199#define __out_ecount_opt(_n) 200#define __out_bcount(_n) 201#define __out_bcount_opt(_n) 202#define __out_bcount_part(_n, _l) 203#define __out_bcount_part_opt(_n, _l) 204 205#define __deref_out 206 207#define __inout 208#define __inout_opt 209#define __inout_ecount(_n) 210#define __inout_ecount_opt(_n) 211#define __inout_bcount(_n) 212#define __inout_bcount_opt(_n) 213#define __inout_bcount_full_opt(_n) 214 215#define __deref_out_bcount_opt(n) 216 217#define __checkReturn 218#define __success(_x) 219 220#define __drv_when(_p, _c) 221 222/* Code inclusion options */ 223 224 225#define EFSYS_OPT_NAMES 1 226 227#define EFSYS_OPT_SIENA 1 228#define EFSYS_OPT_HUNTINGTON 1 229#define EFSYS_OPT_MEDFORD 1 230#ifdef DEBUG 231#define EFSYS_OPT_CHECK_REG 1 232#else 233#define EFSYS_OPT_CHECK_REG 0 234#endif 235 236#define EFSYS_OPT_MCDI 1 237#define EFSYS_OPT_MCDI_LOGGING 0 238#define EFSYS_OPT_MCDI_PROXY_AUTH 0 239 240#define EFSYS_OPT_MAC_STATS 1 241 242#define EFSYS_OPT_LOOPBACK 0 243 244#define EFSYS_OPT_MON_MCDI 0 245#define EFSYS_OPT_MON_STATS 0 246 247#define EFSYS_OPT_PHY_STATS 1 248#define EFSYS_OPT_BIST 1 249#define EFSYS_OPT_PHY_LED_CONTROL 1 250#define EFSYS_OPT_PHY_FLAGS 0 251 252#define EFSYS_OPT_VPD 1 253#define EFSYS_OPT_NVRAM 1 254#define EFSYS_OPT_BOOTCFG 0 255 256#define EFSYS_OPT_DIAG 0 257#define EFSYS_OPT_RX_SCALE 1 258#define EFSYS_OPT_QSTATS 1 259#define EFSYS_OPT_FILTER 1 260#define EFSYS_OPT_RX_SCATTER 0 261 262#define EFSYS_OPT_EV_PREFETCH 0 263 264#define EFSYS_OPT_DECODE_INTR_FATAL 1 265 266#define EFSYS_OPT_LICENSING 0 267 268#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0 269 270/* ID */ 271 272typedef struct __efsys_identifier_s efsys_identifier_t; 273 274/* PROBE */ 275 276#ifndef DTRACE_PROBE 277 278#define EFSYS_PROBE(_name) 279 280#define EFSYS_PROBE1(_name, _type1, _arg1) 281 282#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) 283 284#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 285 _type3, _arg3) 286 287#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 288 _type3, _arg3, _type4, _arg4) 289 290#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 291 _type3, _arg3, _type4, _arg4, _type5, _arg5) 292 293#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 294 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 295 _type6, _arg6) 296 297#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 298 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 299 _type6, _arg6, _type7, _arg7) 300 301#else /* DTRACE_PROBE */ 302 303#define EFSYS_PROBE(_name) \ 304 DTRACE_PROBE(_name) 305 306#define EFSYS_PROBE1(_name, _type1, _arg1) \ 307 DTRACE_PROBE1(_name, _type1, _arg1) 308 309#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ 310 DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2) 311 312#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 313 _type3, _arg3) \ 314 DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 315 _type3, _arg3) 316 317#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 318 _type3, _arg3, _type4, _arg4) \ 319 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 320 _type3, _arg3, _type4, _arg4) 321 322#ifdef DTRACE_PROBE5 323#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 324 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 325 DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 326 _type3, _arg3, _type4, _arg4, _type5, _arg5) 327#else 328#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 329 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 330 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 331 _type3, _arg3, _type4, _arg4) 332#endif 333 334#ifdef DTRACE_PROBE6 335#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 336 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 337 _type6, _arg6) \ 338 DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 339 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 340 _type6, _arg6) 341#else 342#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 343 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 344 _type6, _arg6) \ 345 EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 346 _type3, _arg3, _type4, _arg4, _type5, _arg5) 347#endif 348 349#ifdef DTRACE_PROBE7 350#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 351 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 352 _type6, _arg6, _type7, _arg7) \ 353 DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 354 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 355 _type6, _arg6, _type7, _arg7) 356#else 357#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 358 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 359 _type6, _arg6, _type7, _arg7) \ 360 EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 361 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 362 _type6, _arg6) 363#endif 364 365#endif /* DTRACE_PROBE */ 366 367/* DMA */ 368 369typedef uint64_t efsys_dma_addr_t; 370 371typedef struct efsys_mem_s { 372 bus_dma_tag_t esm_tag; 373 bus_dmamap_t esm_map; 374 caddr_t esm_base; 375 efsys_dma_addr_t esm_addr; 376} efsys_mem_t; 377 378 379#define EFSYS_MEM_ZERO(_esmp, _size) \ 380 do { \ 381 (void) memset((_esmp)->esm_base, 0, (_size)); \ 382 \ 383 _NOTE(CONSTANTCONDITION) \ 384 } while (B_FALSE) 385 386#define EFSYS_MEM_READD(_esmp, _offset, _edp) \ 387 do { \ 388 uint32_t *addr; \ 389 \ 390 _NOTE(CONSTANTCONDITION) \ 391 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 392 sizeof (efx_dword_t)), \ 393 ("not power of 2 aligned")); \ 394 \ 395 addr = (void *)((_esmp)->esm_base + (_offset)); \ 396 \ 397 (_edp)->ed_u32[0] = *addr; \ 398 \ 399 EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \ 400 uint32_t, (_edp)->ed_u32[0]); \ 401 \ 402 _NOTE(CONSTANTCONDITION) \ 403 } while (B_FALSE) 404 405#if defined(__x86_64__) 406#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 407 do { \ 408 uint64_t *addr; \ 409 \ 410 _NOTE(CONSTANTCONDITION) \ 411 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 412 sizeof (efx_qword_t)), \ 413 ("not power of 2 aligned")); \ 414 \ 415 addr = (void *)((_esmp)->esm_base + (_offset)); \ 416 \ 417 (_eqp)->eq_u64[0] = *addr; \ 418 \ 419 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 420 uint32_t, (_eqp)->eq_u32[1], \ 421 uint32_t, (_eqp)->eq_u32[0]); \ 422 \ 423 _NOTE(CONSTANTCONDITION) \ 424 } while (B_FALSE) 425#else 426#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 427 do { \ 428 uint32_t *addr; \ 429 \ 430 _NOTE(CONSTANTCONDITION) \ 431 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 432 sizeof (efx_qword_t)), \ 433 ("not power of 2 aligned")); \ 434 \ 435 addr = (void *)((_esmp)->esm_base + (_offset)); \ 436 \ 437 (_eqp)->eq_u32[0] = *addr++; \ 438 (_eqp)->eq_u32[1] = *addr; \ 439 \ 440 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 441 uint32_t, (_eqp)->eq_u32[1], \ 442 uint32_t, (_eqp)->eq_u32[0]); \ 443 \ 444 _NOTE(CONSTANTCONDITION) \ 445 } while (B_FALSE) 446#endif 447 448#if defined(__x86_64__) 449#define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 450 do { \ 451 uint64_t *addr; \ 452 \ 453 _NOTE(CONSTANTCONDITION) \ 454 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 455 sizeof (efx_oword_t)), \ 456 ("not power of 2 aligned")); \ 457 \ 458 addr = (void *)((_esmp)->esm_base + (_offset)); \ 459 \ 460 (_eop)->eo_u64[0] = *addr++; \ 461 (_eop)->eo_u64[1] = *addr; \ 462 \ 463 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 464 uint32_t, (_eop)->eo_u32[3], \ 465 uint32_t, (_eop)->eo_u32[2], \ 466 uint32_t, (_eop)->eo_u32[1], \ 467 uint32_t, (_eop)->eo_u32[0]); \ 468 \ 469 _NOTE(CONSTANTCONDITION) \ 470 } while (B_FALSE) 471#else 472#define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 473 do { \ 474 uint32_t *addr; \ 475 \ 476 _NOTE(CONSTANTCONDITION) \ 477 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 478 sizeof (efx_oword_t)), \ 479 ("not power of 2 aligned")); \ 480 \ 481 addr = (void *)((_esmp)->esm_base + (_offset)); \ 482 \ 483 (_eop)->eo_u32[0] = *addr++; \ 484 (_eop)->eo_u32[1] = *addr++; \ 485 (_eop)->eo_u32[2] = *addr++; \ 486 (_eop)->eo_u32[3] = *addr; \ 487 \ 488 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 489 uint32_t, (_eop)->eo_u32[3], \ 490 uint32_t, (_eop)->eo_u32[2], \ 491 uint32_t, (_eop)->eo_u32[1], \ 492 uint32_t, (_eop)->eo_u32[0]); \ 493 \ 494 _NOTE(CONSTANTCONDITION) \ 495 } while (B_FALSE) 496#endif 497 498#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ 499 do { \ 500 uint32_t *addr; \ 501 \ 502 _NOTE(CONSTANTCONDITION) \ 503 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 504 sizeof (efx_dword_t)), \ 505 ("not power of 2 aligned")); \ 506 \ 507 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ 508 uint32_t, (_edp)->ed_u32[0]); \ 509 \ 510 addr = (void *)((_esmp)->esm_base + (_offset)); \ 511 \ 512 *addr = (_edp)->ed_u32[0]; \ 513 \ 514 _NOTE(CONSTANTCONDITION) \ 515 } while (B_FALSE) 516 517#if defined(__x86_64__) 518#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 519 do { \ 520 uint64_t *addr; \ 521 \ 522 _NOTE(CONSTANTCONDITION) \ 523 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 524 sizeof (efx_qword_t)), \ 525 ("not power of 2 aligned")); \ 526 \ 527 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 528 uint32_t, (_eqp)->eq_u32[1], \ 529 uint32_t, (_eqp)->eq_u32[0]); \ 530 \ 531 addr = (void *)((_esmp)->esm_base + (_offset)); \ 532 \ 533 *addr = (_eqp)->eq_u64[0]; \ 534 \ 535 _NOTE(CONSTANTCONDITION) \ 536 } while (B_FALSE) 537 538#else 539#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 540 do { \ 541 uint32_t *addr; \ 542 \ 543 _NOTE(CONSTANTCONDITION) \ 544 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 545 sizeof (efx_qword_t)), \ 546 ("not power of 2 aligned")); \ 547 \ 548 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 549 uint32_t, (_eqp)->eq_u32[1], \ 550 uint32_t, (_eqp)->eq_u32[0]); \ 551 \ 552 addr = (void *)((_esmp)->esm_base + (_offset)); \ 553 \ 554 *addr++ = (_eqp)->eq_u32[0]; \ 555 *addr = (_eqp)->eq_u32[1]; \ 556 \ 557 _NOTE(CONSTANTCONDITION) \ 558 } while (B_FALSE) 559#endif 560 561#if defined(__x86_64__) 562#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 563 do { \ 564 uint64_t *addr; \ 565 \ 566 _NOTE(CONSTANTCONDITION) \ 567 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 568 sizeof (efx_oword_t)), \ 569 ("not power of 2 aligned")); \ 570 \ 571 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 572 uint32_t, (_eop)->eo_u32[3], \ 573 uint32_t, (_eop)->eo_u32[2], \ 574 uint32_t, (_eop)->eo_u32[1], \ 575 uint32_t, (_eop)->eo_u32[0]); \ 576 \ 577 addr = (void *)((_esmp)->esm_base + (_offset)); \ 578 \ 579 *addr++ = (_eop)->eo_u64[0]; \ 580 *addr = (_eop)->eo_u64[1]; \ 581 \ 582 _NOTE(CONSTANTCONDITION) \ 583 } while (B_FALSE) 584#else 585#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 586 do { \ 587 uint32_t *addr; \ 588 \ 589 _NOTE(CONSTANTCONDITION) \ 590 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 591 sizeof (efx_oword_t)), \ 592 ("not power of 2 aligned")); \ 593 \ 594 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 595 uint32_t, (_eop)->eo_u32[3], \ 596 uint32_t, (_eop)->eo_u32[2], \ 597 uint32_t, (_eop)->eo_u32[1], \ 598 uint32_t, (_eop)->eo_u32[0]); \ 599 \ 600 addr = (void *)((_esmp)->esm_base + (_offset)); \ 601 \ 602 *addr++ = (_eop)->eo_u32[0]; \ 603 *addr++ = (_eop)->eo_u32[1]; \ 604 *addr++ = (_eop)->eo_u32[2]; \ 605 *addr = (_eop)->eo_u32[3]; \ 606 \ 607 _NOTE(CONSTANTCONDITION) \ 608 } while (B_FALSE) 609#endif 610 611#define EFSYS_MEM_ADDR(_esmp) \ 612 ((_esmp)->esm_addr) 613 614#define EFSYS_MEM_IS_NULL(_esmp) \ 615 ((_esmp)->esm_base == NULL) 616 617/* BAR */ 618 619#define SFXGE_LOCK_NAME_MAX 16 620 621typedef struct efsys_bar_s { 622 struct mtx esb_lock; 623 char esb_lock_name[SFXGE_LOCK_NAME_MAX]; 624 bus_space_tag_t esb_tag; 625 bus_space_handle_t esb_handle; 626 int esb_rid; 627 struct resource *esb_res; 628} efsys_bar_t; 629 630#define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \ 631 do { \ 632 snprintf((_esbp)->esb_lock_name, \ 633 sizeof((_esbp)->esb_lock_name), \ 634 "%s:bar", (_ifname)); \ 635 mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \ 636 NULL, MTX_DEF); \ 637 _NOTE(CONSTANTCONDITION) \ 638 } while (B_FALSE) 639#define SFXGE_BAR_LOCK_DESTROY(_esbp) \ 640 mtx_destroy(&(_esbp)->esb_lock) 641#define SFXGE_BAR_LOCK(_esbp) \ 642 mtx_lock(&(_esbp)->esb_lock) 643#define SFXGE_BAR_UNLOCK(_esbp) \ 644 mtx_unlock(&(_esbp)->esb_lock) 645 646#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ 647 do { \ 648 _NOTE(CONSTANTCONDITION) \ 649 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 650 sizeof (efx_dword_t)), \ 651 ("not power of 2 aligned")); \ 652 \ 653 _NOTE(CONSTANTCONDITION) \ 654 if (_lock) \ 655 SFXGE_BAR_LOCK(_esbp); \ 656 \ 657 (_edp)->ed_u32[0] = bus_space_read_stream_4( \ 658 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 659 (_offset)); \ 660 \ 661 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ 662 uint32_t, (_edp)->ed_u32[0]); \ 663 \ 664 _NOTE(CONSTANTCONDITION) \ 665 if (_lock) \ 666 SFXGE_BAR_UNLOCK(_esbp); \ 667 _NOTE(CONSTANTCONDITION) \ 668 } while (B_FALSE) 669 670#if defined(SFXGE_USE_BUS_SPACE_8) 671#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 672 do { \ 673 _NOTE(CONSTANTCONDITION) \ 674 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 675 sizeof (efx_qword_t)), \ 676 ("not power of 2 aligned")); \ 677 \ 678 SFXGE_BAR_LOCK(_esbp); \ 679 \ 680 (_eqp)->eq_u64[0] = bus_space_read_stream_8( \ 681 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 682 (_offset)); \ 683 \ 684 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 685 uint32_t, (_eqp)->eq_u32[1], \ 686 uint32_t, (_eqp)->eq_u32[0]); \ 687 \ 688 SFXGE_BAR_UNLOCK(_esbp); \ 689 _NOTE(CONSTANTCONDITION) \ 690 } while (B_FALSE) 691 692#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 693 do { \ 694 _NOTE(CONSTANTCONDITION) \ 695 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 696 sizeof (efx_oword_t)), \ 697 ("not power of 2 aligned")); \ 698 \ 699 _NOTE(CONSTANTCONDITION) \ 700 if (_lock) \ 701 SFXGE_BAR_LOCK(_esbp); \ 702 \ 703 (_eop)->eo_u64[0] = bus_space_read_stream_8( \ 704 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 705 (_offset)); \ 706 (_eop)->eo_u64[1] = bus_space_read_stream_8( \ 707 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 708 (_offset) + 8); \ 709 \ 710 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 711 uint32_t, (_eop)->eo_u32[3], \ 712 uint32_t, (_eop)->eo_u32[2], \ 713 uint32_t, (_eop)->eo_u32[1], \ 714 uint32_t, (_eop)->eo_u32[0]); \ 715 \ 716 _NOTE(CONSTANTCONDITION) \ 717 if (_lock) \ 718 SFXGE_BAR_UNLOCK(_esbp); \ 719 _NOTE(CONSTANTCONDITION) \ 720 } while (B_FALSE) 721 722#else 723#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 724 do { \ 725 _NOTE(CONSTANTCONDITION) \ 726 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 727 sizeof (efx_qword_t)), \ 728 ("not power of 2 aligned")); \ 729 \ 730 SFXGE_BAR_LOCK(_esbp); \ 731 \ 732 (_eqp)->eq_u32[0] = bus_space_read_stream_4( \ 733 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 734 (_offset)); \ 735 (_eqp)->eq_u32[1] = bus_space_read_stream_4( \ 736 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 737 (_offset) + 4); \ 738 \ 739 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 740 uint32_t, (_eqp)->eq_u32[1], \ 741 uint32_t, (_eqp)->eq_u32[0]); \ 742 \ 743 SFXGE_BAR_UNLOCK(_esbp); \ 744 _NOTE(CONSTANTCONDITION) \ 745 } while (B_FALSE) 746 747#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 748 do { \ 749 _NOTE(CONSTANTCONDITION) \ 750 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 751 sizeof (efx_oword_t)), \ 752 ("not power of 2 aligned")); \ 753 \ 754 _NOTE(CONSTANTCONDITION) \ 755 if (_lock) \ 756 SFXGE_BAR_LOCK(_esbp); \ 757 \ 758 (_eop)->eo_u32[0] = bus_space_read_stream_4( \ 759 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 760 (_offset)); \ 761 (_eop)->eo_u32[1] = bus_space_read_stream_4( \ 762 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 763 (_offset) + 4); \ 764 (_eop)->eo_u32[2] = bus_space_read_stream_4( \ 765 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 766 (_offset) + 8); \ 767 (_eop)->eo_u32[3] = bus_space_read_stream_4( \ 768 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 769 (_offset) + 12); \ 770 \ 771 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 772 uint32_t, (_eop)->eo_u32[3], \ 773 uint32_t, (_eop)->eo_u32[2], \ 774 uint32_t, (_eop)->eo_u32[1], \ 775 uint32_t, (_eop)->eo_u32[0]); \ 776 \ 777 _NOTE(CONSTANTCONDITION) \ 778 if (_lock) \ 779 SFXGE_BAR_UNLOCK(_esbp); \ 780 _NOTE(CONSTANTCONDITION) \ 781 } while (B_FALSE) 782#endif 783 784#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ 785 do { \ 786 _NOTE(CONSTANTCONDITION) \ 787 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 788 sizeof (efx_dword_t)), \ 789 ("not power of 2 aligned")); \ 790 \ 791 _NOTE(CONSTANTCONDITION) \ 792 if (_lock) \ 793 SFXGE_BAR_LOCK(_esbp); \ 794 \ 795 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ 796 uint32_t, (_edp)->ed_u32[0]); \ 797 \ 798 /* \ 799 * Make sure that previous writes to the dword have \ 800 * been done. It should be cheaper than barrier just \ 801 * after the write below. \ 802 */ \ 803 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 804 (_offset), sizeof (efx_dword_t), \ 805 BUS_SPACE_BARRIER_WRITE); \ 806 bus_space_write_stream_4((_esbp)->esb_tag, \ 807 (_esbp)->esb_handle, \ 808 (_offset), (_edp)->ed_u32[0]); \ 809 \ 810 _NOTE(CONSTANTCONDITION) \ 811 if (_lock) \ 812 SFXGE_BAR_UNLOCK(_esbp); \ 813 _NOTE(CONSTANTCONDITION) \ 814 } while (B_FALSE) 815 816#if defined(SFXGE_USE_BUS_SPACE_8) 817#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 818 do { \ 819 _NOTE(CONSTANTCONDITION) \ 820 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 821 sizeof (efx_qword_t)), \ 822 ("not power of 2 aligned")); \ 823 \ 824 SFXGE_BAR_LOCK(_esbp); \ 825 \ 826 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 827 uint32_t, (_eqp)->eq_u32[1], \ 828 uint32_t, (_eqp)->eq_u32[0]); \ 829 \ 830 /* \ 831 * Make sure that previous writes to the qword have \ 832 * been done. It should be cheaper than barrier just \ 833 * after the write below. \ 834 */ \ 835 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 836 (_offset), sizeof (efx_qword_t), \ 837 BUS_SPACE_BARRIER_WRITE); \ 838 bus_space_write_stream_8((_esbp)->esb_tag, \ 839 (_esbp)->esb_handle, \ 840 (_offset), (_eqp)->eq_u64[0]); \ 841 \ 842 SFXGE_BAR_UNLOCK(_esbp); \ 843 _NOTE(CONSTANTCONDITION) \ 844 } while (B_FALSE) 845#else 846#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 847 do { \ 848 _NOTE(CONSTANTCONDITION) \ 849 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 850 sizeof (efx_qword_t)), \ 851 ("not power of 2 aligned")); \ 852 \ 853 SFXGE_BAR_LOCK(_esbp); \ 854 \ 855 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 856 uint32_t, (_eqp)->eq_u32[1], \ 857 uint32_t, (_eqp)->eq_u32[0]); \ 858 \ 859 /* \ 860 * Make sure that previous writes to the qword have \ 861 * been done. It should be cheaper than barrier just \ 862 * after the last write below. \ 863 */ \ 864 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 865 (_offset), sizeof (efx_qword_t), \ 866 BUS_SPACE_BARRIER_WRITE); \ 867 bus_space_write_stream_4((_esbp)->esb_tag, \ 868 (_esbp)->esb_handle, \ 869 (_offset), (_eqp)->eq_u32[0]); \ 870 /* \ 871 * It should be guaranteed that the last dword comes \ 872 * the last, so barrier entire qword to be sure that \ 873 * neither above nor below writes are reordered. \ 874 */ \ 875 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 876 (_offset), sizeof (efx_qword_t), \ 877 BUS_SPACE_BARRIER_WRITE); \ 878 bus_space_write_stream_4((_esbp)->esb_tag, \ 879 (_esbp)->esb_handle, \ 880 (_offset) + 4, (_eqp)->eq_u32[1]); \ 881 \ 882 SFXGE_BAR_UNLOCK(_esbp); \ 883 _NOTE(CONSTANTCONDITION) \ 884 } while (B_FALSE) 885#endif 886 887/* 888 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping 889 * (required by PIO hardware) 890 */ 891#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ 892 do { \ 893 _NOTE(CONSTANTCONDITION) \ 894 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 895 sizeof (efx_qword_t)), \ 896 ("not power of 2 aligned")); \ 897 \ 898 (void) (_esbp); \ 899 \ 900 /* FIXME: Perform a 64-bit write */ \ 901 KASSERT(0, ("not implemented")); \ 902 \ 903 _NOTE(CONSTANTCONDITION) \ 904 } while (B_FALSE) 905 906#if defined(SFXGE_USE_BUS_SPACE_8) 907#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 908 do { \ 909 _NOTE(CONSTANTCONDITION) \ 910 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 911 sizeof (efx_oword_t)), \ 912 ("not power of 2 aligned")); \ 913 \ 914 _NOTE(CONSTANTCONDITION) \ 915 if (_lock) \ 916 SFXGE_BAR_LOCK(_esbp); \ 917 \ 918 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 919 uint32_t, (_eop)->eo_u32[3], \ 920 uint32_t, (_eop)->eo_u32[2], \ 921 uint32_t, (_eop)->eo_u32[1], \ 922 uint32_t, (_eop)->eo_u32[0]); \ 923 \ 924 /* \ 925 * Make sure that previous writes to the oword have \ 926 * been done. It should be cheaper than barrier just \ 927 * after the last write below. \ 928 */ \ 929 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 930 (_offset), sizeof (efx_oword_t), \ 931 BUS_SPACE_BARRIER_WRITE); \ 932 bus_space_write_stream_8((_esbp)->esb_tag, \ 933 (_esbp)->esb_handle, \ 934 (_offset), (_eop)->eo_u64[0]); \ 935 /* \ 936 * It should be guaranteed that the last qword comes \ 937 * the last, so barrier entire oword to be sure that \ 938 * neither above nor below writes are reordered. \ 939 */ \ 940 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 941 (_offset), sizeof (efx_oword_t), \ 942 BUS_SPACE_BARRIER_WRITE); \ 943 bus_space_write_stream_8((_esbp)->esb_tag, \ 944 (_esbp)->esb_handle, \ 945 (_offset) + 8, (_eop)->eo_u64[1]); \ 946 \ 947 _NOTE(CONSTANTCONDITION) \ 948 if (_lock) \ 949 SFXGE_BAR_UNLOCK(_esbp); \ 950 _NOTE(CONSTANTCONDITION) \ 951 } while (B_FALSE) 952 953#else 954#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 955 do { \ 956 _NOTE(CONSTANTCONDITION) \ 957 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 958 sizeof (efx_oword_t)), \ 959 ("not power of 2 aligned")); \ 960 \ 961 _NOTE(CONSTANTCONDITION) \ 962 if (_lock) \ 963 SFXGE_BAR_LOCK(_esbp); \ 964 \ 965 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 966 uint32_t, (_eop)->eo_u32[3], \ 967 uint32_t, (_eop)->eo_u32[2], \ 968 uint32_t, (_eop)->eo_u32[1], \ 969 uint32_t, (_eop)->eo_u32[0]); \ 970 \ 971 /* \ 972 * Make sure that previous writes to the oword have \ 973 * been done. It should be cheaper than barrier just \ 974 * after the last write below. \ 975 */ \ 976 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 977 (_offset), sizeof (efx_oword_t), \ 978 BUS_SPACE_BARRIER_WRITE); \ 979 bus_space_write_stream_4((_esbp)->esb_tag, \ 980 (_esbp)->esb_handle, \ 981 (_offset), (_eop)->eo_u32[0]); \ 982 bus_space_write_stream_4((_esbp)->esb_tag, \ 983 (_esbp)->esb_handle, \ 984 (_offset) + 4, (_eop)->eo_u32[1]); \ 985 bus_space_write_stream_4((_esbp)->esb_tag, \ 986 (_esbp)->esb_handle, \ 987 (_offset) + 8, (_eop)->eo_u32[2]); \ 988 /* \ 989 * It should be guaranteed that the last dword comes \ 990 * the last, so barrier entire oword to be sure that \ 991 * neither above nor below writes are reordered. \ 992 */ \ 993 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 994 (_offset), sizeof (efx_oword_t), \ 995 BUS_SPACE_BARRIER_WRITE); \ 996 bus_space_write_stream_4((_esbp)->esb_tag, \ 997 (_esbp)->esb_handle, \ 998 (_offset) + 12, (_eop)->eo_u32[3]); \ 999 \ 1000 _NOTE(CONSTANTCONDITION) \ 1001 if (_lock) \ 1002 SFXGE_BAR_UNLOCK(_esbp); \ 1003 _NOTE(CONSTANTCONDITION) \ 1004 } while (B_FALSE) 1005#endif 1006 1007/* Use the standard octo-word write for doorbell writes */ 1008#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ 1009 do { \ 1010 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ 1011 _NOTE(CONSTANTCONDITION) \ 1012 } while (B_FALSE) 1013 1014/* SPIN */ 1015 1016#define EFSYS_SPIN(_us) \ 1017 do { \ 1018 DELAY(_us); \ 1019 _NOTE(CONSTANTCONDITION) \ 1020 } while (B_FALSE) 1021 1022#define EFSYS_SLEEP EFSYS_SPIN 1023 1024/* BARRIERS */ 1025 1026#define EFSYS_MEM_READ_BARRIER() rmb() 1027#define EFSYS_PIO_WRITE_BARRIER() 1028 1029/* DMA SYNC */ 1030#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \ 1031 do { \ 1032 bus_dmamap_sync((_esmp)->esm_tag, \ 1033 (_esmp)->esm_map, \ 1034 BUS_DMASYNC_POSTREAD); \ 1035 _NOTE(CONSTANTCONDITION) \ 1036 } while (B_FALSE) 1037 1038#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \ 1039 do { \ 1040 bus_dmamap_sync((_esmp)->esm_tag, \ 1041 (_esmp)->esm_map, \ 1042 BUS_DMASYNC_PREWRITE); \ 1043 _NOTE(CONSTANTCONDITION) \ 1044 } while (B_FALSE) 1045 1046/* TIMESTAMP */ 1047 1048typedef clock_t efsys_timestamp_t; 1049 1050#define EFSYS_TIMESTAMP(_usp) \ 1051 do { \ 1052 clock_t now; \ 1053 \ 1054 now = ticks; \ 1055 *(_usp) = now * hz / 1000000; \ 1056 _NOTE(CONSTANTCONDITION) \ 1057 } while (B_FALSE) 1058 1059/* KMEM */ 1060 1061#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ 1062 do { \ 1063 (_esip) = (_esip); \ 1064 /* \ 1065 * The macro is used in non-sleepable contexts, for \ 1066 * example, holding a mutex. \ 1067 */ \ 1068 (_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO); \ 1069 _NOTE(CONSTANTCONDITION) \ 1070 } while (B_FALSE) 1071 1072#define EFSYS_KMEM_FREE(_esip, _size, _p) \ 1073 do { \ 1074 (void) (_esip); \ 1075 (void) (_size); \ 1076 free((_p), M_SFXGE); \ 1077 _NOTE(CONSTANTCONDITION) \ 1078 } while (B_FALSE) 1079 1080/* LOCK */ 1081 1082typedef struct efsys_lock_s { 1083 struct mtx lock; 1084 char lock_name[SFXGE_LOCK_NAME_MAX]; 1085} efsys_lock_t; 1086 1087#define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ 1088 do { \ 1089 efsys_lock_t *__eslp = (_eslp); \ 1090 \ 1091 snprintf((__eslp)->lock_name, \ 1092 sizeof((__eslp)->lock_name), \ 1093 "%s:%s", (_ifname), (_label)); \ 1094 mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \ 1095 NULL, MTX_DEF); \ 1096 } while (B_FALSE) 1097#define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \ 1098 mtx_destroy(&(_eslp)->lock) 1099#define SFXGE_EFSYS_LOCK(_eslp) \ 1100 mtx_lock(&(_eslp)->lock) 1101#define SFXGE_EFSYS_UNLOCK(_eslp) \ 1102 mtx_unlock(&(_eslp)->lock) 1103#define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ 1104 mtx_assert(&(_eslp)->lock, MA_OWNED) 1105 1106typedef int efsys_lock_state_t; 1107 1108#define EFSYS_LOCK_MAGIC 0x000010c4 1109 1110#define EFSYS_LOCK(_lockp, _state) \ 1111 do { \ 1112 SFXGE_EFSYS_LOCK(_lockp); \ 1113 (_state) = EFSYS_LOCK_MAGIC; \ 1114 _NOTE(CONSTANTCONDITION) \ 1115 } while (B_FALSE) 1116 1117#define EFSYS_UNLOCK(_lockp, _state) \ 1118 do { \ 1119 if ((_state) != EFSYS_LOCK_MAGIC) \ 1120 KASSERT(B_FALSE, ("not locked")); \ 1121 SFXGE_EFSYS_UNLOCK(_lockp); \ 1122 _NOTE(CONSTANTCONDITION) \ 1123 } while (B_FALSE) 1124 1125/* STAT */ 1126 1127typedef uint64_t efsys_stat_t; 1128 1129#define EFSYS_STAT_INCR(_knp, _delta) \ 1130 do { \ 1131 *(_knp) += (_delta); \ 1132 _NOTE(CONSTANTCONDITION) \ 1133 } while (B_FALSE) 1134 1135#define EFSYS_STAT_DECR(_knp, _delta) \ 1136 do { \ 1137 *(_knp) -= (_delta); \ 1138 _NOTE(CONSTANTCONDITION) \ 1139 } while (B_FALSE) 1140 1141#define EFSYS_STAT_SET(_knp, _val) \ 1142 do { \ 1143 *(_knp) = (_val); \ 1144 _NOTE(CONSTANTCONDITION) \ 1145 } while (B_FALSE) 1146 1147#define EFSYS_STAT_SET_QWORD(_knp, _valp) \ 1148 do { \ 1149 *(_knp) = le64toh((_valp)->eq_u64[0]); \ 1150 _NOTE(CONSTANTCONDITION) \ 1151 } while (B_FALSE) 1152 1153#define EFSYS_STAT_SET_DWORD(_knp, _valp) \ 1154 do { \ 1155 *(_knp) = le32toh((_valp)->ed_u32[0]); \ 1156 _NOTE(CONSTANTCONDITION) \ 1157 } while (B_FALSE) 1158 1159#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ 1160 do { \ 1161 *(_knp) += le64toh((_valp)->eq_u64[0]); \ 1162 _NOTE(CONSTANTCONDITION) \ 1163 } while (B_FALSE) 1164 1165#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ 1166 do { \ 1167 *(_knp) -= le64toh((_valp)->eq_u64[0]); \ 1168 _NOTE(CONSTANTCONDITION) \ 1169 } while (B_FALSE) 1170 1171/* ERR */ 1172 1173extern void sfxge_err(efsys_identifier_t *, unsigned int, 1174 uint32_t, uint32_t); 1175 1176#if EFSYS_OPT_DECODE_INTR_FATAL 1177#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ 1178 do { \ 1179 sfxge_err((_esip), (_code), (_dword0), (_dword1)); \ 1180 _NOTE(CONSTANTCONDITION) \ 1181 } while (B_FALSE) 1182#endif 1183 1184/* ASSERT */ 1185 1186#define EFSYS_ASSERT(_exp) do { \ 1187 if (!(_exp)) \ 1188 panic("%s", #_exp); \ 1189 } while (0) 1190 1191#define EFSYS_ASSERT3(_x, _op, _y, _t) do { \ 1192 const _t __x = (_t)(_x); \ 1193 const _t __y = (_t)(_y); \ 1194 if (!(__x _op __y)) \ 1195 panic("assertion failed at %s:%u", __FILE__, __LINE__); \ 1196 } while(0) 1197 1198#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) 1199#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) 1200#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) 1201 1202/* ROTATE */ 1203 1204#define EFSYS_HAS_ROTL_DWORD 0 1205 1206#ifdef __cplusplus 1207} 1208#endif 1209 1210#endif /* _SYS_EFSYS_H */ 1211