1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010-2016 Solarflare Communications Inc.
5 * All rights reserved.
6 *
7 * This software was developed in part by Philip Paeps under contract for
8 * Solarflare Communications, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 *    this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 *    this list of conditions and the following disclaimer in the documentation
17 *    and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing official
33 * policies, either expressed or implied, of the FreeBSD Project.
34 *
35 * $FreeBSD$
36 */
37
38#ifndef	_SYS_EFSYS_H
39#define	_SYS_EFSYS_H
40
41#ifdef	__cplusplus
42extern "C" {
43#endif
44
45#include <sys/param.h>
46#include <sys/bus.h>
47#include <sys/endian.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/mutex.h>
52#include <sys/rwlock.h>
53#include <sys/sdt.h>
54#include <sys/systm.h>
55
56#include <machine/bus.h>
57#include <machine/endian.h>
58
59#define	EFSYS_HAS_UINT64 1
60#if defined(__x86_64__)
61#define	EFSYS_USE_UINT64 1
62#else
63#define	EFSYS_USE_UINT64 0
64#endif
65#define	EFSYS_HAS_SSE2_M128 0
66#if _BYTE_ORDER == _BIG_ENDIAN
67#define	EFSYS_IS_BIG_ENDIAN 1
68#define	EFSYS_IS_LITTLE_ENDIAN 0
69#elif _BYTE_ORDER == _LITTLE_ENDIAN
70#define	EFSYS_IS_BIG_ENDIAN 0
71#define	EFSYS_IS_LITTLE_ENDIAN 1
72#endif
73#include "efx_types.h"
74
75#ifndef B_FALSE
76#define	B_FALSE	FALSE
77#endif
78#ifndef B_TRUE
79#define	B_TRUE	TRUE
80#endif
81
82#ifndef IS2P
83#define	ISP2(x)			(((x) & ((x) - 1)) == 0)
84#endif
85
86#if defined(__x86_64__)
87
88#define	SFXGE_USE_BUS_SPACE_8		1
89
90#if !defined(bus_space_read_stream_8)
91
92#define	bus_space_read_stream_8(t, h, o)				\
93	bus_space_read_8((t), (h), (o))
94
95#define	bus_space_write_stream_8(t, h, o, v)				\
96	bus_space_write_8((t), (h), (o), (v))
97
98#endif
99
100#endif
101
102#define	ENOTACTIVE EINVAL
103
104/* Memory type to use on FreeBSD */
105MALLOC_DECLARE(M_SFXGE);
106
107/* Machine dependend prefetch wrappers */
108#if defined(__i386__) || defined(__amd64__)
109static __inline void
110prefetch_read_many(void *addr)
111{
112
113	__asm__(
114	    "prefetcht0 (%0)"
115	    :
116	    : "r" (addr));
117}
118
119static __inline void
120prefetch_read_once(void *addr)
121{
122
123	__asm__(
124	    "prefetchnta (%0)"
125	    :
126	    : "r" (addr));
127}
128#else
129static __inline void
130prefetch_read_many(void *addr)
131{
132
133}
134
135static __inline void
136prefetch_read_once(void *addr)
137{
138
139}
140#endif
141
142#if defined(__i386__) || defined(__amd64__)
143#include <vm/vm.h>
144#include <vm/pmap.h>
145#endif
146static __inline void
147sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
148		    struct mbuf *m, bus_dma_segment_t *seg)
149{
150#if defined(__i386__) || defined(__amd64__)
151	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
152	seg->ds_len = m->m_len;
153#else
154	int nsegstmp;
155
156	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
157#endif
158}
159
160/* Code inclusion options */
161
162#define	EFSYS_OPT_NAMES 1
163
164#define	EFSYS_OPT_SIENA 1
165#define	EFSYS_OPT_HUNTINGTON 1
166#define	EFSYS_OPT_MEDFORD 1
167#define	EFSYS_OPT_MEDFORD2 1
168#ifdef DEBUG
169#define	EFSYS_OPT_CHECK_REG 1
170#else
171#define	EFSYS_OPT_CHECK_REG 0
172#endif
173
174#define	EFSYS_OPT_MCDI 1
175#define	EFSYS_OPT_MCDI_LOGGING 0
176#define	EFSYS_OPT_MCDI_PROXY_AUTH 0
177
178#define	EFSYS_OPT_MAC_STATS 1
179
180#define	EFSYS_OPT_LOOPBACK 0
181
182#define	EFSYS_OPT_MON_MCDI 0
183#define	EFSYS_OPT_MON_STATS 0
184
185#define	EFSYS_OPT_PHY_STATS 1
186#define	EFSYS_OPT_BIST 1
187#define	EFSYS_OPT_PHY_LED_CONTROL 1
188#define	EFSYS_OPT_PHY_FLAGS 0
189
190#define	EFSYS_OPT_VPD 1
191#define	EFSYS_OPT_NVRAM 1
192#define	EFSYS_OPT_BOOTCFG 0
193#define	EFSYS_OPT_IMAGE_LAYOUT 0
194
195#define	EFSYS_OPT_DIAG 0
196#define	EFSYS_OPT_RX_SCALE 1
197#define	EFSYS_OPT_QSTATS 1
198#define	EFSYS_OPT_FILTER 1
199#define	EFSYS_OPT_RX_SCATTER 0
200
201#define	EFSYS_OPT_EV_PREFETCH 0
202
203#define	EFSYS_OPT_DECODE_INTR_FATAL 1
204
205#define	EFSYS_OPT_LICENSING 0
206
207#define	EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
208
209#define	EFSYS_OPT_RX_PACKED_STREAM 0
210
211#define	EFSYS_OPT_RX_ES_SUPER_BUFFER 0
212
213#define	EFSYS_OPT_TUNNEL 0
214
215#define	EFSYS_OPT_FW_SUBVARIANT_AWARE 0
216
217/* ID */
218
219typedef struct __efsys_identifier_s	efsys_identifier_t;
220
221/* PROBE */
222
223#ifndef DTRACE_PROBE
224
225#define	EFSYS_PROBE(_name)
226
227#define	EFSYS_PROBE1(_name, _type1, _arg1)
228
229#define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
230
231#define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
232	    _type3, _arg3)
233
234#define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
235	    _type3, _arg3, _type4, _arg4)
236
237#define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
238	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
239
240#define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
241	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
242	    _type6, _arg6)
243
244#define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
245	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
246	    _type6, _arg6, _type7, _arg7)
247
248#else /* DTRACE_PROBE */
249
250#define	EFSYS_PROBE(_name)						\
251	DTRACE_PROBE(_name)
252
253#define	EFSYS_PROBE1(_name, _type1, _arg1)				\
254	DTRACE_PROBE1(_name, _type1, _arg1)
255
256#define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
257	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
258
259#define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
260	    _type3, _arg3)						\
261	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
262	    _type3, _arg3)
263
264#define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
265	    _type3, _arg3, _type4, _arg4)				\
266	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
267	    _type3, _arg3, _type4, _arg4)
268
269#ifdef DTRACE_PROBE5
270#define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
271	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
272	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
273	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
274#else
275#define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
276	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
277	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
278	    _type3, _arg3, _type4, _arg4)
279#endif
280
281#ifdef DTRACE_PROBE6
282#define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
283	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
284	    _type6, _arg6)						\
285	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
286	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
287	    _type6, _arg6)
288#else
289#define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
290	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
291	    _type6, _arg6)						\
292	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
293	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
294#endif
295
296#ifdef DTRACE_PROBE7
297#define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
298	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
299	    _type6, _arg6, _type7, _arg7)				\
300	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
301	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
302	    _type6, _arg6, _type7, _arg7)
303#else
304#define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
305	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
306	    _type6, _arg6, _type7, _arg7)				\
307	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
308	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
309	    _type6, _arg6)
310#endif
311
312#endif /* DTRACE_PROBE */
313
314/* DMA */
315
316typedef uint64_t		efsys_dma_addr_t;
317
318typedef struct efsys_mem_s {
319	bus_dma_tag_t		esm_tag;
320	bus_dmamap_t		esm_map;
321	caddr_t			esm_base;
322	efsys_dma_addr_t	esm_addr;
323	size_t			esm_size;
324} efsys_mem_t;
325
326#define	EFSYS_MEM_SIZE(_esmp)						\
327	((_esmp)->esm_size)
328
329#define	EFSYS_MEM_ADDR(_esmp)						\
330	((_esmp)->esm_addr)
331
332#define	EFSYS_MEM_IS_NULL(_esmp)					\
333	((_esmp)->esm_base == NULL)
334
335#define	EFSYS_MEM_ZERO(_esmp, _size)					\
336	do {								\
337		(void) memset((_esmp)->esm_base, 0, (_size));		\
338									\
339	_NOTE(CONSTANTCONDITION)					\
340	} while (B_FALSE)
341
342#define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
343	do {								\
344		uint32_t *addr;						\
345									\
346		_NOTE(CONSTANTCONDITION)				\
347		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
348		    sizeof (efx_dword_t)),				\
349		    ("not power of 2 aligned"));			\
350									\
351		addr = (void *)((_esmp)->esm_base + (_offset));		\
352									\
353		(_edp)->ed_u32[0] = *addr;				\
354									\
355		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
356		    uint32_t, (_edp)->ed_u32[0]);			\
357									\
358	_NOTE(CONSTANTCONDITION)					\
359	} while (B_FALSE)
360
361#if defined(__x86_64__)
362#define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
363	do {								\
364		uint64_t *addr;						\
365									\
366		_NOTE(CONSTANTCONDITION)				\
367		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
368		    sizeof (efx_qword_t)),				\
369		    ("not power of 2 aligned"));			\
370									\
371		addr = (void *)((_esmp)->esm_base + (_offset));		\
372									\
373		(_eqp)->eq_u64[0] = *addr;				\
374									\
375		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
376		    uint32_t, (_eqp)->eq_u32[1],			\
377		    uint32_t, (_eqp)->eq_u32[0]);			\
378									\
379	_NOTE(CONSTANTCONDITION)					\
380	} while (B_FALSE)
381#else
382#define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
383	do {								\
384		uint32_t *addr;						\
385									\
386		_NOTE(CONSTANTCONDITION)				\
387		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
388		    sizeof (efx_qword_t)),				\
389		    ("not power of 2 aligned"));			\
390									\
391		addr = (void *)((_esmp)->esm_base + (_offset));		\
392									\
393		(_eqp)->eq_u32[0] = *addr++;				\
394		(_eqp)->eq_u32[1] = *addr;				\
395									\
396		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
397		    uint32_t, (_eqp)->eq_u32[1],			\
398		    uint32_t, (_eqp)->eq_u32[0]);			\
399									\
400	_NOTE(CONSTANTCONDITION)					\
401	} while (B_FALSE)
402#endif
403
404#if defined(__x86_64__)
405#define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
406	do {								\
407		uint64_t *addr;						\
408									\
409		_NOTE(CONSTANTCONDITION)				\
410		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
411		    sizeof (efx_oword_t)),				\
412		    ("not power of 2 aligned"));			\
413									\
414		addr = (void *)((_esmp)->esm_base + (_offset));		\
415									\
416		(_eop)->eo_u64[0] = *addr++;				\
417		(_eop)->eo_u64[1] = *addr;				\
418									\
419		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
420		    uint32_t, (_eop)->eo_u32[3],			\
421		    uint32_t, (_eop)->eo_u32[2],			\
422		    uint32_t, (_eop)->eo_u32[1],			\
423		    uint32_t, (_eop)->eo_u32[0]);			\
424									\
425	_NOTE(CONSTANTCONDITION)					\
426	} while (B_FALSE)
427#else
428#define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
429	do {								\
430		uint32_t *addr;						\
431									\
432		_NOTE(CONSTANTCONDITION)				\
433		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
434		    sizeof (efx_oword_t)),				\
435		    ("not power of 2 aligned"));			\
436									\
437		addr = (void *)((_esmp)->esm_base + (_offset));		\
438									\
439		(_eop)->eo_u32[0] = *addr++;				\
440		(_eop)->eo_u32[1] = *addr++;				\
441		(_eop)->eo_u32[2] = *addr++;				\
442		(_eop)->eo_u32[3] = *addr;				\
443									\
444		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
445		    uint32_t, (_eop)->eo_u32[3],			\
446		    uint32_t, (_eop)->eo_u32[2],			\
447		    uint32_t, (_eop)->eo_u32[1],			\
448		    uint32_t, (_eop)->eo_u32[0]);			\
449									\
450	_NOTE(CONSTANTCONDITION)					\
451	} while (B_FALSE)
452#endif
453
454#define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
455	do {								\
456		uint32_t *addr;						\
457									\
458		_NOTE(CONSTANTCONDITION)				\
459		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
460		    sizeof (efx_dword_t)),				\
461		    ("not power of 2 aligned"));			\
462									\
463		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
464		    uint32_t, (_edp)->ed_u32[0]);			\
465									\
466		addr = (void *)((_esmp)->esm_base + (_offset));		\
467									\
468		*addr = (_edp)->ed_u32[0];				\
469									\
470	_NOTE(CONSTANTCONDITION)					\
471	} while (B_FALSE)
472
473#if defined(__x86_64__)
474#define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
475	do {								\
476		uint64_t *addr;						\
477									\
478		_NOTE(CONSTANTCONDITION)				\
479		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
480		    sizeof (efx_qword_t)),				\
481		    ("not power of 2 aligned"));			\
482									\
483		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
484		    uint32_t, (_eqp)->eq_u32[1],			\
485		    uint32_t, (_eqp)->eq_u32[0]);			\
486									\
487		addr = (void *)((_esmp)->esm_base + (_offset));		\
488									\
489		*addr   = (_eqp)->eq_u64[0];				\
490									\
491	_NOTE(CONSTANTCONDITION)					\
492	} while (B_FALSE)
493
494#else
495#define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
496	do {								\
497		uint32_t *addr;						\
498									\
499		_NOTE(CONSTANTCONDITION)				\
500		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
501		    sizeof (efx_qword_t)),				\
502		    ("not power of 2 aligned"));			\
503									\
504		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
505		    uint32_t, (_eqp)->eq_u32[1],			\
506		    uint32_t, (_eqp)->eq_u32[0]);			\
507									\
508		addr = (void *)((_esmp)->esm_base + (_offset));		\
509									\
510		*addr++ = (_eqp)->eq_u32[0];				\
511		*addr   = (_eqp)->eq_u32[1];				\
512									\
513	_NOTE(CONSTANTCONDITION)					\
514	} while (B_FALSE)
515#endif
516
517#if defined(__x86_64__)
518#define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
519	do {								\
520		uint64_t *addr;						\
521									\
522		_NOTE(CONSTANTCONDITION)				\
523		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
524		    sizeof (efx_oword_t)),				\
525		    ("not power of 2 aligned"));			\
526									\
527		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
528		    uint32_t, (_eop)->eo_u32[3],			\
529		    uint32_t, (_eop)->eo_u32[2],			\
530		    uint32_t, (_eop)->eo_u32[1],			\
531		    uint32_t, (_eop)->eo_u32[0]);			\
532									\
533		addr = (void *)((_esmp)->esm_base + (_offset));		\
534									\
535		*addr++ = (_eop)->eo_u64[0];				\
536		*addr   = (_eop)->eo_u64[1];				\
537									\
538	_NOTE(CONSTANTCONDITION)					\
539	} while (B_FALSE)
540#else
541#define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
542	do {								\
543		uint32_t *addr;						\
544									\
545		_NOTE(CONSTANTCONDITION)				\
546		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
547		    sizeof (efx_oword_t)),				\
548		    ("not power of 2 aligned"));			\
549									\
550		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
551		    uint32_t, (_eop)->eo_u32[3],			\
552		    uint32_t, (_eop)->eo_u32[2],			\
553		    uint32_t, (_eop)->eo_u32[1],			\
554		    uint32_t, (_eop)->eo_u32[0]);			\
555									\
556		addr = (void *)((_esmp)->esm_base + (_offset));		\
557									\
558		*addr++ = (_eop)->eo_u32[0];				\
559		*addr++ = (_eop)->eo_u32[1];				\
560		*addr++ = (_eop)->eo_u32[2];				\
561		*addr   = (_eop)->eo_u32[3];				\
562									\
563	_NOTE(CONSTANTCONDITION)					\
564	} while (B_FALSE)
565#endif
566
567/* BAR */
568
569#define	SFXGE_LOCK_NAME_MAX	16
570
571typedef struct efsys_bar_s {
572	struct mtx		esb_lock;
573	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
574	bus_space_tag_t		esb_tag;
575	bus_space_handle_t	esb_handle;
576	int			esb_rid;
577	struct resource		*esb_res;
578} efsys_bar_t;
579
580#define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
581	do {								\
582		snprintf((_esbp)->esb_lock_name,			\
583			 sizeof((_esbp)->esb_lock_name),		\
584			 "%s:bar", (_ifname));				\
585		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
586			 NULL, MTX_DEF);				\
587	_NOTE(CONSTANTCONDITION)					\
588	} while (B_FALSE)
589#define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
590	mtx_destroy(&(_esbp)->esb_lock)
591#define	SFXGE_BAR_LOCK(_esbp)						\
592	mtx_lock(&(_esbp)->esb_lock)
593#define	SFXGE_BAR_UNLOCK(_esbp)						\
594	mtx_unlock(&(_esbp)->esb_lock)
595
596#define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
597	do {								\
598		_NOTE(CONSTANTCONDITION)				\
599		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
600		    sizeof (efx_dword_t)),				\
601		    ("not power of 2 aligned"));			\
602									\
603		_NOTE(CONSTANTCONDITION)				\
604		if (_lock)						\
605			SFXGE_BAR_LOCK(_esbp);				\
606									\
607		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
608		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
609		    (_offset));						\
610									\
611		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
612		    uint32_t, (_edp)->ed_u32[0]);			\
613									\
614		_NOTE(CONSTANTCONDITION)				\
615		if (_lock)						\
616			SFXGE_BAR_UNLOCK(_esbp);			\
617	_NOTE(CONSTANTCONDITION)					\
618	} while (B_FALSE)
619
620#if defined(SFXGE_USE_BUS_SPACE_8)
621#define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
622	do {								\
623		_NOTE(CONSTANTCONDITION)				\
624		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
625		    sizeof (efx_qword_t)),				\
626		    ("not power of 2 aligned"));			\
627									\
628		SFXGE_BAR_LOCK(_esbp);					\
629									\
630		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
631		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
632		    (_offset));						\
633									\
634		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
635		    uint32_t, (_eqp)->eq_u32[1],			\
636		    uint32_t, (_eqp)->eq_u32[0]);			\
637									\
638		SFXGE_BAR_UNLOCK(_esbp);				\
639	_NOTE(CONSTANTCONDITION)					\
640	} while (B_FALSE)
641
642#define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
643	do {								\
644		_NOTE(CONSTANTCONDITION)				\
645		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
646		    sizeof (efx_oword_t)),				\
647		    ("not power of 2 aligned"));			\
648									\
649		_NOTE(CONSTANTCONDITION)				\
650		if (_lock)						\
651			SFXGE_BAR_LOCK(_esbp);				\
652									\
653		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
654		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
655		    (_offset));						\
656		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
657		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
658		    (_offset) + 8);					\
659									\
660		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
661		    uint32_t, (_eop)->eo_u32[3],			\
662		    uint32_t, (_eop)->eo_u32[2],			\
663		    uint32_t, (_eop)->eo_u32[1],			\
664		    uint32_t, (_eop)->eo_u32[0]);			\
665									\
666		_NOTE(CONSTANTCONDITION)				\
667		if (_lock)						\
668			SFXGE_BAR_UNLOCK(_esbp);			\
669	_NOTE(CONSTANTCONDITION)					\
670	} while (B_FALSE)
671
672#else
673#define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
674	do {								\
675		_NOTE(CONSTANTCONDITION)				\
676		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
677		    sizeof (efx_qword_t)),				\
678		    ("not power of 2 aligned"));			\
679									\
680		SFXGE_BAR_LOCK(_esbp);					\
681									\
682		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
683		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
684		    (_offset));						\
685		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
686		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
687		    (_offset) + 4);					\
688									\
689		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
690		    uint32_t, (_eqp)->eq_u32[1],			\
691		    uint32_t, (_eqp)->eq_u32[0]);			\
692									\
693		SFXGE_BAR_UNLOCK(_esbp);				\
694	_NOTE(CONSTANTCONDITION)					\
695	} while (B_FALSE)
696
697#define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
698	do {								\
699		_NOTE(CONSTANTCONDITION)				\
700		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
701		    sizeof (efx_oword_t)),				\
702		    ("not power of 2 aligned"));			\
703									\
704		_NOTE(CONSTANTCONDITION)				\
705		if (_lock)						\
706			SFXGE_BAR_LOCK(_esbp);				\
707									\
708		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
709		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
710		    (_offset));						\
711		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
712		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
713		    (_offset) + 4);					\
714		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
715		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
716		    (_offset) + 8);					\
717		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
718		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
719		    (_offset) + 12);					\
720									\
721		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
722		    uint32_t, (_eop)->eo_u32[3],			\
723		    uint32_t, (_eop)->eo_u32[2],			\
724		    uint32_t, (_eop)->eo_u32[1],			\
725		    uint32_t, (_eop)->eo_u32[0]);			\
726									\
727		_NOTE(CONSTANTCONDITION)				\
728		if (_lock)						\
729			SFXGE_BAR_UNLOCK(_esbp);			\
730	_NOTE(CONSTANTCONDITION)					\
731	} while (B_FALSE)
732#endif
733
734#define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
735	do {								\
736		_NOTE(CONSTANTCONDITION)				\
737		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
738		    sizeof (efx_dword_t)),				\
739		    ("not power of 2 aligned"));			\
740									\
741		_NOTE(CONSTANTCONDITION)				\
742		if (_lock)						\
743			SFXGE_BAR_LOCK(_esbp);				\
744									\
745		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
746		    uint32_t, (_edp)->ed_u32[0]);			\
747									\
748		/*							\
749		 * Make sure that previous writes to the dword have	\
750		 * been done. It should be cheaper than barrier just	\
751		 * after the write below.				\
752		 */							\
753		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
754		    (_offset), sizeof (efx_dword_t),			\
755		    BUS_SPACE_BARRIER_WRITE);				\
756		bus_space_write_stream_4((_esbp)->esb_tag,		\
757		    (_esbp)->esb_handle,				\
758		    (_offset), (_edp)->ed_u32[0]);			\
759									\
760		_NOTE(CONSTANTCONDITION)				\
761		if (_lock)						\
762			SFXGE_BAR_UNLOCK(_esbp);			\
763	_NOTE(CONSTANTCONDITION)					\
764	} while (B_FALSE)
765
766#if defined(SFXGE_USE_BUS_SPACE_8)
767#define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
768	do {								\
769		_NOTE(CONSTANTCONDITION)				\
770		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
771		    sizeof (efx_qword_t)),				\
772		    ("not power of 2 aligned"));			\
773									\
774		SFXGE_BAR_LOCK(_esbp);					\
775									\
776		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
777		    uint32_t, (_eqp)->eq_u32[1],			\
778		    uint32_t, (_eqp)->eq_u32[0]);			\
779									\
780		/*							\
781		 * Make sure that previous writes to the qword have	\
782		 * been done. It should be cheaper than barrier just	\
783		 * after the write below.				\
784		 */							\
785		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
786		    (_offset), sizeof (efx_qword_t),			\
787		    BUS_SPACE_BARRIER_WRITE);				\
788		bus_space_write_stream_8((_esbp)->esb_tag,		\
789		    (_esbp)->esb_handle,				\
790		    (_offset), (_eqp)->eq_u64[0]);			\
791									\
792		SFXGE_BAR_UNLOCK(_esbp);				\
793	_NOTE(CONSTANTCONDITION)					\
794	} while (B_FALSE)
795#else
796#define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
797	do {								\
798		_NOTE(CONSTANTCONDITION)				\
799		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
800		    sizeof (efx_qword_t)),				\
801		    ("not power of 2 aligned"));			\
802									\
803		SFXGE_BAR_LOCK(_esbp);					\
804									\
805		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
806		    uint32_t, (_eqp)->eq_u32[1],			\
807		    uint32_t, (_eqp)->eq_u32[0]);			\
808									\
809		/*							\
810		 * Make sure that previous writes to the qword have	\
811		 * been done. It should be cheaper than barrier just	\
812		 * after the last write below.				\
813		 */							\
814		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
815		    (_offset), sizeof (efx_qword_t),			\
816		    BUS_SPACE_BARRIER_WRITE);				\
817		bus_space_write_stream_4((_esbp)->esb_tag,		\
818		    (_esbp)->esb_handle,				\
819		    (_offset), (_eqp)->eq_u32[0]);			\
820		/*							\
821		 * It should be guaranteed that the last dword comes	\
822		 * the last, so barrier entire qword to be sure that	\
823		 * neither above nor below writes are reordered.	\
824		 */							\
825		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
826		    (_offset), sizeof (efx_qword_t),			\
827		    BUS_SPACE_BARRIER_WRITE);				\
828		bus_space_write_stream_4((_esbp)->esb_tag,		\
829		    (_esbp)->esb_handle,				\
830		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
831									\
832		SFXGE_BAR_UNLOCK(_esbp);				\
833	_NOTE(CONSTANTCONDITION)					\
834	} while (B_FALSE)
835#endif
836
837/*
838 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
839 * (required by PIO hardware)
840 */
841#define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
842	do {								\
843		_NOTE(CONSTANTCONDITION)				\
844		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
845		    sizeof (efx_qword_t)),				\
846		    ("not power of 2 aligned"));			\
847									\
848		(void) (_esbp);						\
849									\
850		/* FIXME: Perform a 64-bit write */			\
851		KASSERT(0, ("not implemented"));			\
852									\
853	_NOTE(CONSTANTCONDITION)					\
854	} while (B_FALSE)
855
856#if defined(SFXGE_USE_BUS_SPACE_8)
857#define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
858	do {								\
859		_NOTE(CONSTANTCONDITION)				\
860		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
861		    sizeof (efx_oword_t)),				\
862		    ("not power of 2 aligned"));			\
863									\
864		_NOTE(CONSTANTCONDITION)				\
865		if (_lock)						\
866			SFXGE_BAR_LOCK(_esbp);				\
867									\
868		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
869		    uint32_t, (_eop)->eo_u32[3],			\
870		    uint32_t, (_eop)->eo_u32[2],			\
871		    uint32_t, (_eop)->eo_u32[1],			\
872		    uint32_t, (_eop)->eo_u32[0]);			\
873									\
874		/*							\
875		 * Make sure that previous writes to the oword have	\
876		 * been done. It should be cheaper than barrier just	\
877		 * after the last write below.				\
878		 */							\
879		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
880		    (_offset), sizeof (efx_oword_t),			\
881		    BUS_SPACE_BARRIER_WRITE);				\
882		bus_space_write_stream_8((_esbp)->esb_tag,		\
883		    (_esbp)->esb_handle,				\
884		    (_offset), (_eop)->eo_u64[0]);			\
885		/*							\
886		 * It should be guaranteed that the last qword comes	\
887		 * the last, so barrier entire oword to be sure that	\
888		 * neither above nor below writes are reordered.	\
889		 */							\
890		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
891		    (_offset), sizeof (efx_oword_t),			\
892		    BUS_SPACE_BARRIER_WRITE);				\
893		bus_space_write_stream_8((_esbp)->esb_tag,		\
894		    (_esbp)->esb_handle,				\
895		    (_offset) + 8, (_eop)->eo_u64[1]);			\
896									\
897		_NOTE(CONSTANTCONDITION)				\
898		if (_lock)						\
899			SFXGE_BAR_UNLOCK(_esbp);			\
900	_NOTE(CONSTANTCONDITION)					\
901	} while (B_FALSE)
902
903#else
904#define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
905	do {								\
906		_NOTE(CONSTANTCONDITION)				\
907		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
908		    sizeof (efx_oword_t)),				\
909		    ("not power of 2 aligned"));			\
910									\
911		_NOTE(CONSTANTCONDITION)				\
912		if (_lock)						\
913			SFXGE_BAR_LOCK(_esbp);				\
914									\
915		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
916		    uint32_t, (_eop)->eo_u32[3],			\
917		    uint32_t, (_eop)->eo_u32[2],			\
918		    uint32_t, (_eop)->eo_u32[1],			\
919		    uint32_t, (_eop)->eo_u32[0]);			\
920									\
921		/*							\
922		 * Make sure that previous writes to the oword have	\
923		 * been done. It should be cheaper than barrier just	\
924		 * after the last write below.				\
925		 */							\
926		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
927		    (_offset), sizeof (efx_oword_t),			\
928		    BUS_SPACE_BARRIER_WRITE);				\
929		bus_space_write_stream_4((_esbp)->esb_tag,		\
930		    (_esbp)->esb_handle,				\
931		    (_offset), (_eop)->eo_u32[0]);			\
932		bus_space_write_stream_4((_esbp)->esb_tag,		\
933		    (_esbp)->esb_handle,				\
934		    (_offset) + 4, (_eop)->eo_u32[1]);			\
935		bus_space_write_stream_4((_esbp)->esb_tag,		\
936		    (_esbp)->esb_handle,				\
937		    (_offset) + 8, (_eop)->eo_u32[2]);			\
938		/*							\
939		 * It should be guaranteed that the last dword comes	\
940		 * the last, so barrier entire oword to be sure that	\
941		 * neither above nor below writes are reordered.	\
942		 */							\
943		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
944		    (_offset), sizeof (efx_oword_t),			\
945		    BUS_SPACE_BARRIER_WRITE);				\
946		bus_space_write_stream_4((_esbp)->esb_tag,		\
947		    (_esbp)->esb_handle,				\
948		    (_offset) + 12, (_eop)->eo_u32[3]);			\
949									\
950		_NOTE(CONSTANTCONDITION)				\
951		if (_lock)						\
952			SFXGE_BAR_UNLOCK(_esbp);			\
953	_NOTE(CONSTANTCONDITION)					\
954	} while (B_FALSE)
955#endif
956
957/* Use the standard octo-word write for doorbell writes */
958#define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
959	do {								\
960		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
961	_NOTE(CONSTANTCONDITION)					\
962	} while (B_FALSE)
963
964/* SPIN */
965
966#define	EFSYS_SPIN(_us)							\
967	do {								\
968		DELAY(_us);						\
969	_NOTE(CONSTANTCONDITION)					\
970	} while (B_FALSE)
971
972#define	EFSYS_SLEEP	EFSYS_SPIN
973
974/* BARRIERS */
975
976#define	EFSYS_MEM_READ_BARRIER()	rmb()
977#define	EFSYS_PIO_WRITE_BARRIER()
978
979/* DMA SYNC */
980#define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
981	do {								\
982		bus_dmamap_sync((_esmp)->esm_tag,			\
983		    (_esmp)->esm_map,					\
984		    BUS_DMASYNC_POSTREAD);				\
985	_NOTE(CONSTANTCONDITION)					\
986	} while (B_FALSE)
987
988#define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
989	do {								\
990		bus_dmamap_sync((_esmp)->esm_tag,			\
991		    (_esmp)->esm_map,					\
992		    BUS_DMASYNC_PREWRITE);				\
993	_NOTE(CONSTANTCONDITION)					\
994	} while (B_FALSE)
995
996/* TIMESTAMP */
997
998typedef	clock_t	efsys_timestamp_t;
999
1000#define	EFSYS_TIMESTAMP(_usp)						\
1001	do {								\
1002		clock_t now;						\
1003									\
1004		now = ticks;						\
1005		*(_usp) = now * hz / 1000000;				\
1006	_NOTE(CONSTANTCONDITION)					\
1007	} while (B_FALSE)
1008
1009/* KMEM */
1010
1011#define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1012	do {								\
1013		(_esip) = (_esip);					\
1014		/*							\
1015		 * The macro is used in non-sleepable contexts, for	\
1016		 * example, holding a mutex.				\
1017		 */							\
1018		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1019	_NOTE(CONSTANTCONDITION)					\
1020	} while (B_FALSE)
1021
1022#define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1023	do {								\
1024		(void) (_esip);						\
1025		(void) (_size);						\
1026		free((_p), M_SFXGE);					\
1027	_NOTE(CONSTANTCONDITION)					\
1028	} while (B_FALSE)
1029
1030/* LOCK */
1031
1032typedef struct efsys_lock_s {
1033	struct mtx	lock;
1034	char		lock_name[SFXGE_LOCK_NAME_MAX];
1035} efsys_lock_t;
1036
1037#define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1038	do {								\
1039		efsys_lock_t *__eslp = (_eslp);				\
1040									\
1041		snprintf((__eslp)->lock_name,				\
1042			 sizeof((__eslp)->lock_name),			\
1043			 "%s:%s", (_ifname), (_label));			\
1044		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1045			 NULL, MTX_DEF);				\
1046	} while (B_FALSE)
1047#define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1048	mtx_destroy(&(_eslp)->lock)
1049#define	SFXGE_EFSYS_LOCK(_eslp)						\
1050	mtx_lock(&(_eslp)->lock)
1051#define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1052	mtx_unlock(&(_eslp)->lock)
1053#define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1054	mtx_assert(&(_eslp)->lock, MA_OWNED)
1055
1056typedef int efsys_lock_state_t;
1057
1058#define	EFSYS_LOCK_MAGIC	0x000010c4
1059
1060#define	EFSYS_LOCK(_lockp, _state)					\
1061	do {								\
1062		SFXGE_EFSYS_LOCK(_lockp);				\
1063		(_state) = EFSYS_LOCK_MAGIC;				\
1064	_NOTE(CONSTANTCONDITION)					\
1065	} while (B_FALSE)
1066
1067#define	EFSYS_UNLOCK(_lockp, _state)					\
1068	do {								\
1069		if ((_state) != EFSYS_LOCK_MAGIC)			\
1070			KASSERT(B_FALSE, ("not locked"));		\
1071		SFXGE_EFSYS_UNLOCK(_lockp);				\
1072	_NOTE(CONSTANTCONDITION)					\
1073	} while (B_FALSE)
1074
1075/* STAT */
1076
1077typedef uint64_t		efsys_stat_t;
1078
1079#define	EFSYS_STAT_INCR(_knp, _delta) 					\
1080	do {								\
1081		*(_knp) += (_delta);					\
1082	_NOTE(CONSTANTCONDITION)					\
1083	} while (B_FALSE)
1084
1085#define	EFSYS_STAT_DECR(_knp, _delta) 					\
1086	do {								\
1087		*(_knp) -= (_delta);					\
1088	_NOTE(CONSTANTCONDITION)					\
1089	} while (B_FALSE)
1090
1091#define	EFSYS_STAT_SET(_knp, _val)					\
1092	do {								\
1093		*(_knp) = (_val);					\
1094	_NOTE(CONSTANTCONDITION)					\
1095	} while (B_FALSE)
1096
1097#define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1098	do {								\
1099		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1100	_NOTE(CONSTANTCONDITION)					\
1101	} while (B_FALSE)
1102
1103#define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1104	do {								\
1105		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1106	_NOTE(CONSTANTCONDITION)					\
1107	} while (B_FALSE)
1108
1109#define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1110	do {								\
1111		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1112	_NOTE(CONSTANTCONDITION)					\
1113	} while (B_FALSE)
1114
1115#define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1116	do {								\
1117		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1118	_NOTE(CONSTANTCONDITION)					\
1119	} while (B_FALSE)
1120
1121/* ERR */
1122
1123extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1124		    uint32_t, uint32_t);
1125
1126#if EFSYS_OPT_DECODE_INTR_FATAL
1127#define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1128	do {								\
1129		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1130	_NOTE(CONSTANTCONDITION)					\
1131	} while (B_FALSE)
1132#endif
1133
1134/* ASSERT */
1135
1136#define	EFSYS_ASSERT(_exp) do {						\
1137	if (!(_exp))							\
1138		panic("%s", #_exp);					\
1139	} while (0)
1140
1141#define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1142	const _t __x = (_t)(_x);					\
1143	const _t __y = (_t)(_y);					\
1144	if (!(__x _op __y))						\
1145		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1146	} while(0)
1147
1148#define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1149#define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1150#define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1151
1152/* ROTATE */
1153
1154#define	EFSYS_HAS_ROTL_DWORD 0
1155
1156#ifdef	__cplusplus
1157}
1158#endif
1159
1160#endif	/* _SYS_EFSYS_H */
1161