ntb_hw_intel.c revision 289648
1/*-
2 * Copyright (C) 2013 Intel Corporation
3 * Copyright (C) 2015 EMC Corporation
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/ntb/ntb_hw/ntb_hw.c 289648 2015-10-20 19:20:15Z cem $");
30
31#include <sys/param.h>
32#include <sys/kernel.h>
33#include <sys/systm.h>
34#include <sys/bus.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/queue.h>
38#include <sys/rman.h>
39#include <sys/sysctl.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42#include <machine/bus.h>
43#include <machine/pmap.h>
44#include <machine/resource.h>
45#include <dev/pci/pcireg.h>
46#include <dev/pci/pcivar.h>
47
48#include "ntb_regs.h"
49#include "ntb_hw.h"
50
51/*
52 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that
53 * allows you to connect two systems using a PCI-e link.
54 *
55 * This module contains the hardware abstraction layer for the NTB. It allows
56 * you to send and recieve interrupts, map the memory windows and send and
57 * receive messages in the scratch-pad registers.
58 *
59 * NOTE: Much of the code in this module is shared with Linux. Any patches may
60 * be picked up and redistributed in Linux with a dual GPL/BSD license.
61 */
62
63#define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT)
64
65#define NTB_HB_TIMEOUT		1 /* second */
66#define ATOM_LINK_RECOVERY_TIME	500 /* ms */
67
68#define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev))
69
70enum ntb_device_type {
71	NTB_XEON,
72	NTB_ATOM
73};
74
75/* ntb_conn_type are hardware numbers, cannot change. */
76enum ntb_conn_type {
77	NTB_CONN_TRANSPARENT = 0,
78	NTB_CONN_B2B = 1,
79	NTB_CONN_RP = 2,
80};
81
82enum ntb_b2b_direction {
83	NTB_DEV_USD = 0,
84	NTB_DEV_DSD = 1,
85};
86
87enum ntb_bar {
88	NTB_CONFIG_BAR = 0,
89	NTB_B2B_BAR_1,
90	NTB_B2B_BAR_2,
91	NTB_B2B_BAR_3,
92	NTB_MAX_BARS
93};
94
95/* Device features and workarounds */
96#define HAS_FEATURE(feature)	\
97	((ntb->features & (feature)) != 0)
98
99struct ntb_hw_info {
100	uint32_t		device_id;
101	const char		*desc;
102	enum ntb_device_type	type;
103	uint32_t		features;
104};
105
106struct ntb_pci_bar_info {
107	bus_space_tag_t		pci_bus_tag;
108	bus_space_handle_t	pci_bus_handle;
109	int			pci_resource_id;
110	struct resource		*pci_resource;
111	vm_paddr_t		pbase;
112	void			*vbase;
113	u_long			size;
114
115	/* Configuration register offsets */
116	uint32_t		psz_off;
117	uint32_t		ssz_off;
118	uint32_t		pbarxlat_off;
119};
120
121struct ntb_int_info {
122	struct resource	*res;
123	int		rid;
124	void		*tag;
125};
126
127struct ntb_vec {
128	struct ntb_softc	*ntb;
129	uint32_t		num;
130};
131
132struct ntb_reg {
133	uint32_t	ntb_ctl;
134	uint32_t	lnk_sta;
135	uint8_t		db_size;
136	unsigned	mw_bar[NTB_MAX_BARS];
137};
138
139struct ntb_alt_reg {
140	uint32_t	db_bell;
141	uint32_t	db_mask;
142	uint32_t	spad;
143};
144
145struct ntb_xlat_reg {
146	uint32_t	bar0_base;
147	uint32_t	bar2_base;
148	uint32_t	bar4_base;
149	uint32_t	bar5_base;
150
151	uint32_t	bar2_xlat;
152	uint32_t	bar4_xlat;
153	uint32_t	bar5_xlat;
154
155	uint32_t	bar2_limit;
156	uint32_t	bar4_limit;
157	uint32_t	bar5_limit;
158};
159
160struct ntb_b2b_addr {
161	uint64_t	bar0_addr;
162	uint64_t	bar2_addr64;
163	uint64_t	bar4_addr64;
164	uint64_t	bar4_addr32;
165	uint64_t	bar5_addr32;
166};
167
168struct ntb_softc {
169	device_t		device;
170	enum ntb_device_type	type;
171	uint64_t		features;
172
173	struct ntb_pci_bar_info	bar_info[NTB_MAX_BARS];
174	struct ntb_int_info	int_info[MAX_MSIX_INTERRUPTS];
175	uint32_t		allocated_interrupts;
176
177	struct callout		heartbeat_timer;
178	struct callout		lr_timer;
179
180	void			*ntb_ctx;
181	const struct ntb_ctx_ops *ctx_ops;
182	struct ntb_vec		*msix_vec;
183#define CTX_LOCK(sc)		mtx_lock_spin(&(sc)->ctx_lock)
184#define CTX_UNLOCK(sc)		mtx_unlock_spin(&(sc)->ctx_lock)
185#define CTX_ASSERT(sc,f)	mtx_assert(&(sc)->ctx_lock, (f))
186	struct mtx		ctx_lock;
187
188	uint32_t		ppd;
189	enum ntb_conn_type	conn_type;
190	enum ntb_b2b_direction	dev_type;
191
192	/* Offset of peer bar0 in B2B BAR */
193	uint64_t			b2b_off;
194	/* Memory window used to access peer bar0 */
195#define B2B_MW_DISABLED			UINT8_MAX
196	uint8_t				b2b_mw_idx;
197
198	uint8_t				mw_count;
199	uint8_t				spad_count;
200	uint8_t				db_count;
201	uint8_t				db_vec_count;
202	uint8_t				db_vec_shift;
203
204	/* Protects local db_mask. */
205#define DB_MASK_LOCK(sc)	mtx_lock_spin(&(sc)->db_mask_lock)
206#define DB_MASK_UNLOCK(sc)	mtx_unlock_spin(&(sc)->db_mask_lock)
207#define DB_MASK_ASSERT(sc,f)	mtx_assert(&(sc)->db_mask_lock, (f))
208	struct mtx			db_mask_lock;
209
210	uint32_t			ntb_ctl;
211	uint32_t			lnk_sta;
212
213	uint64_t			db_valid_mask;
214	uint64_t			db_link_mask;
215	uint64_t			db_mask;
216
217	int				last_ts;	/* ticks @ last irq */
218
219	const struct ntb_reg		*reg;
220	const struct ntb_alt_reg	*self_reg;
221	const struct ntb_alt_reg	*peer_reg;
222	const struct ntb_xlat_reg	*xlat_reg;
223};
224
225#ifdef __i386__
226static __inline uint64_t
227bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
228    bus_size_t offset)
229{
230
231	return (bus_space_read_4(tag, handle, offset) |
232	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
233}
234
235static __inline void
236bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
237    bus_size_t offset, uint64_t val)
238{
239
240	bus_space_write_4(tag, handle, offset, val);
241	bus_space_write_4(tag, handle, offset + 4, val >> 32);
242}
243#endif
244
245#define ntb_bar_read(SIZE, bar, offset) \
246	    bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
247	    ntb->bar_info[(bar)].pci_bus_handle, (offset))
248#define ntb_bar_write(SIZE, bar, offset, val) \
249	    bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
250	    ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
251#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
252#define ntb_reg_write(SIZE, offset, val) \
253	    ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
254#define ntb_mw_read(SIZE, offset) \
255	    ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset)
256#define ntb_mw_write(SIZE, offset, val) \
257	    ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
258		offset, val)
259
260static int ntb_probe(device_t device);
261static int ntb_attach(device_t device);
262static int ntb_detach(device_t device);
263static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
264static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
265static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
266    uint32_t *base, uint32_t *xlat, uint32_t *lmt);
267static int ntb_map_pci_bars(struct ntb_softc *ntb);
268static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
269    const char *);
270static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
271static int map_memory_window_bar(struct ntb_softc *ntb,
272    struct ntb_pci_bar_info *bar);
273static void ntb_unmap_pci_bar(struct ntb_softc *ntb);
274static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
275static int ntb_init_isr(struct ntb_softc *ntb);
276static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
277static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
278static void ntb_teardown_interrupts(struct ntb_softc *ntb);
279static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
280static void ntb_interrupt(struct ntb_softc *, uint32_t vec);
281static void ndev_vec_isr(void *arg);
282static void ndev_irq_isr(void *arg);
283static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
284static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t val);
285static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
286static void ntb_free_msix_vec(struct ntb_softc *ntb);
287static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id);
288static void ntb_detect_max_mw(struct ntb_softc *ntb);
289static int ntb_detect_xeon(struct ntb_softc *ntb);
290static int ntb_detect_atom(struct ntb_softc *ntb);
291static int ntb_xeon_init_dev(struct ntb_softc *ntb);
292static int ntb_atom_init_dev(struct ntb_softc *ntb);
293static void ntb_teardown_xeon(struct ntb_softc *ntb);
294static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
295static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
296    enum ntb_bar regbar);
297static void xeon_set_sbar_base_and_limit(struct ntb_softc *,
298    uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar);
299static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr,
300    enum ntb_bar idx);
301static int xeon_setup_b2b_mw(struct ntb_softc *,
302    const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr);
303static inline bool link_is_up(struct ntb_softc *ntb);
304static inline bool atom_link_is_err(struct ntb_softc *ntb);
305static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *);
306static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *);
307static void atom_link_hb(void *arg);
308static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec);
309static void recover_atom_link(void *arg);
310static bool ntb_poll_link(struct ntb_softc *ntb);
311static void save_bar_parameters(struct ntb_pci_bar_info *bar);
312
313static struct ntb_hw_info pci_ids[] = {
314	/* XXX: PS/SS IDs left out until they are supported. */
315	{ 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B",
316		NTB_ATOM, 0 },
317
318	{ 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B",
319		NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
320	{ 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B",
321		NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
322	{ 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON,
323		NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
324		    NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K },
325	{ 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON,
326		NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
327		    NTB_SB01BASE_LOCKUP },
328	{ 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON,
329		NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
330		    NTB_SB01BASE_LOCKUP },
331
332	{ 0x00000000, NULL, NTB_ATOM, 0 }
333};
334
335static const struct ntb_reg atom_reg = {
336	.ntb_ctl = ATOM_NTBCNTL_OFFSET,
337	.lnk_sta = ATOM_LINK_STATUS_OFFSET,
338	.db_size = sizeof(uint64_t),
339	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
340};
341
342static const struct ntb_alt_reg atom_pri_reg = {
343	.db_bell = ATOM_PDOORBELL_OFFSET,
344	.db_mask = ATOM_PDBMSK_OFFSET,
345	.spad = ATOM_SPAD_OFFSET,
346};
347
348static const struct ntb_alt_reg atom_b2b_reg = {
349	.db_bell = ATOM_B2B_DOORBELL_OFFSET,
350	.spad = ATOM_B2B_SPAD_OFFSET,
351};
352
353static const struct ntb_xlat_reg atom_sec_xlat = {
354#if 0
355	/* "FIXME" says the Linux driver. */
356	.bar0_base = ATOM_SBAR0BASE_OFFSET,
357	.bar2_base = ATOM_SBAR2BASE_OFFSET,
358	.bar4_base = ATOM_SBAR4BASE_OFFSET,
359
360	.bar2_limit = ATOM_SBAR2LMT_OFFSET,
361	.bar4_limit = ATOM_SBAR4LMT_OFFSET,
362#endif
363
364	.bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
365	.bar4_xlat = ATOM_SBAR4XLAT_OFFSET,
366};
367
368static const struct ntb_reg xeon_reg = {
369	.ntb_ctl = XEON_NTBCNTL_OFFSET,
370	.lnk_sta = XEON_LINK_STATUS_OFFSET,
371	.db_size = sizeof(uint16_t),
372	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 },
373};
374
375static const struct ntb_alt_reg xeon_pri_reg = {
376	.db_bell = XEON_PDOORBELL_OFFSET,
377	.db_mask = XEON_PDBMSK_OFFSET,
378	.spad = XEON_SPAD_OFFSET,
379};
380
381static const struct ntb_alt_reg xeon_b2b_reg = {
382	.db_bell = XEON_B2B_DOORBELL_OFFSET,
383	.spad = XEON_B2B_SPAD_OFFSET,
384};
385
386static const struct ntb_xlat_reg xeon_sec_xlat = {
387	.bar0_base = XEON_SBAR0BASE_OFFSET,
388	.bar2_base = XEON_SBAR2BASE_OFFSET,
389	.bar4_base = XEON_SBAR4BASE_OFFSET,
390	.bar5_base = XEON_SBAR5BASE_OFFSET,
391
392	.bar2_limit = XEON_SBAR2LMT_OFFSET,
393	.bar4_limit = XEON_SBAR4LMT_OFFSET,
394	.bar5_limit = XEON_SBAR5LMT_OFFSET,
395
396	.bar2_xlat = XEON_SBAR2XLAT_OFFSET,
397	.bar4_xlat = XEON_SBAR4XLAT_OFFSET,
398	.bar5_xlat = XEON_SBAR5XLAT_OFFSET,
399};
400
401static struct ntb_b2b_addr xeon_b2b_usd_addr = {
402	.bar0_addr = XEON_B2B_BAR0_USD_ADDR,
403	.bar2_addr64 = XEON_B2B_BAR2_USD_ADDR64,
404	.bar4_addr64 = XEON_B2B_BAR4_USD_ADDR64,
405	.bar4_addr32 = XEON_B2B_BAR4_USD_ADDR32,
406	.bar5_addr32 = XEON_B2B_BAR5_USD_ADDR32,
407};
408
409static struct ntb_b2b_addr xeon_b2b_dsd_addr = {
410	.bar0_addr = XEON_B2B_BAR0_DSD_ADDR,
411	.bar2_addr64 = XEON_B2B_BAR2_DSD_ADDR64,
412	.bar4_addr64 = XEON_B2B_BAR4_DSD_ADDR64,
413	.bar4_addr32 = XEON_B2B_BAR4_DSD_ADDR32,
414	.bar5_addr32 = XEON_B2B_BAR5_DSD_ADDR32,
415};
416
417SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0,
418    "B2B MW segment overrides -- MUST be the same on both sides");
419
420SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN,
421    &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
422    "hardware, use this 64-bit address on the bus between the NTB devices for "
423    "the window at BAR2, on the upstream side of the link.  MUST be the same "
424    "address on both sides.");
425SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN,
426    &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4.");
427SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN,
428    &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 "
429    "(split-BAR mode).");
430SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN,
431    &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 "
432    "(split-BAR mode).");
433
434SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN,
435    &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
436    "hardware, use this 64-bit address on the bus between the NTB devices for "
437    "the window at BAR2, on the downstream side of the link.  MUST be the same"
438    " address on both sides.");
439SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN,
440    &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4.");
441SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN,
442    &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 "
443    "(split-BAR mode).");
444SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN,
445    &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 "
446    "(split-BAR mode).");
447
448/*
449 * OS <-> Driver interface structures
450 */
451MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
452
453static device_method_t ntb_pci_methods[] = {
454	/* Device interface */
455	DEVMETHOD(device_probe,     ntb_probe),
456	DEVMETHOD(device_attach,    ntb_attach),
457	DEVMETHOD(device_detach,    ntb_detach),
458	DEVMETHOD_END
459};
460
461static driver_t ntb_pci_driver = {
462	"ntb_hw",
463	ntb_pci_methods,
464	sizeof(struct ntb_softc),
465};
466
467static devclass_t ntb_devclass;
468DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL);
469MODULE_VERSION(ntb_hw, 1);
470
471SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls");
472
473/*
474 * OS <-> Driver linkage functions
475 */
476static int
477ntb_probe(device_t device)
478{
479	struct ntb_hw_info *p;
480
481	p = ntb_get_device_info(pci_get_devid(device));
482	if (p == NULL)
483		return (ENXIO);
484
485	device_set_desc(device, p->desc);
486	return (0);
487}
488
489static int
490ntb_attach(device_t device)
491{
492	struct ntb_softc *ntb;
493	struct ntb_hw_info *p;
494	int error;
495
496	ntb = DEVICE2SOFTC(device);
497	p = ntb_get_device_info(pci_get_devid(device));
498
499	ntb->device = device;
500	ntb->type = p->type;
501	ntb->features = p->features;
502	ntb->b2b_mw_idx = B2B_MW_DISABLED;
503
504	/* Heartbeat timer for NTB_ATOM since there is no link interrupt */
505	callout_init(&ntb->heartbeat_timer, 1);
506	callout_init(&ntb->lr_timer, 1);
507	mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
508	mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_SPIN);
509
510	if (ntb->type == NTB_ATOM)
511		error = ntb_detect_atom(ntb);
512	else
513		error = ntb_detect_xeon(ntb);
514	if (error)
515		goto out;
516
517	ntb_detect_max_mw(ntb);
518
519	error = ntb_map_pci_bars(ntb);
520	if (error)
521		goto out;
522	if (ntb->type == NTB_ATOM)
523		error = ntb_atom_init_dev(ntb);
524	else
525		error = ntb_xeon_init_dev(ntb);
526	if (error)
527		goto out;
528	error = ntb_init_isr(ntb);
529	if (error)
530		goto out;
531
532	pci_enable_busmaster(ntb->device);
533
534	device_printf(ntb->device, "NTB device registered\n");
535
536out:
537	if (error != 0)
538		ntb_detach(device);
539	return (error);
540}
541
542static int
543ntb_detach(device_t device)
544{
545	struct ntb_softc *ntb;
546
547	ntb = DEVICE2SOFTC(device);
548
549	if (ntb->self_reg != NULL)
550		ntb_db_set_mask(ntb, ntb->db_valid_mask);
551	callout_drain(&ntb->heartbeat_timer);
552	callout_drain(&ntb->lr_timer);
553	if (ntb->type == NTB_XEON)
554		ntb_teardown_xeon(ntb);
555	ntb_teardown_interrupts(ntb);
556
557	mtx_destroy(&ntb->db_mask_lock);
558	mtx_destroy(&ntb->ctx_lock);
559
560	/*
561	 * Redetect total MWs so we unmap properly -- in case we lowered the
562	 * maximum to work around Xeon errata.
563	 */
564	ntb_detect_max_mw(ntb);
565	ntb_unmap_pci_bar(ntb);
566
567	device_printf(ntb->device, "NTB device unregistered\n");
568
569	return (0);
570}
571
572/*
573 * Driver internal routines
574 */
575static inline enum ntb_bar
576ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
577{
578
579	KASSERT(mw < ntb->mw_count ||
580	    (mw != B2B_MW_DISABLED && mw == ntb->b2b_mw_idx),
581	    ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count));
582	KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw"));
583
584	return (ntb->reg->mw_bar[mw]);
585}
586
587static inline bool
588bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar)
589{
590	/* XXX This assertion could be stronger. */
591	KASSERT(bar < NTB_MAX_BARS, ("bogus bar"));
592	return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR));
593}
594
595static inline void
596bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
597    uint32_t *xlat, uint32_t *lmt)
598{
599	uint32_t basev, lmtv, xlatv;
600
601	switch (bar) {
602	case NTB_B2B_BAR_1:
603		basev = ntb->xlat_reg->bar2_base;
604		lmtv = ntb->xlat_reg->bar2_limit;
605		xlatv = ntb->xlat_reg->bar2_xlat;
606		break;
607	case NTB_B2B_BAR_2:
608		basev = ntb->xlat_reg->bar4_base;
609		lmtv = ntb->xlat_reg->bar4_limit;
610		xlatv = ntb->xlat_reg->bar4_xlat;
611		break;
612	case NTB_B2B_BAR_3:
613		basev = ntb->xlat_reg->bar5_base;
614		lmtv = ntb->xlat_reg->bar5_limit;
615		xlatv = ntb->xlat_reg->bar5_xlat;
616		break;
617	default:
618		KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS,
619		    ("bad bar"));
620		basev = lmtv = xlatv = 0;
621		break;
622	}
623
624	if (base != NULL)
625		*base = basev;
626	if (xlat != NULL)
627		*xlat = xlatv;
628	if (lmt != NULL)
629		*lmt = lmtv;
630}
631
632static int
633ntb_map_pci_bars(struct ntb_softc *ntb)
634{
635	int rc;
636
637	ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
638	rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
639	if (rc != 0)
640		goto out;
641
642	ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2);
643	rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]);
644	if (rc != 0)
645		goto out;
646	ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET;
647	ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET;
648	ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET;
649
650	ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4);
651	/* XXX Are shared MW B2Bs write-combining? */
652	if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP) && !HAS_FEATURE(NTB_SPLIT_BAR))
653		rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]);
654	else
655		rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]);
656	ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET;
657	ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET;
658	ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET;
659
660	if (!HAS_FEATURE(NTB_SPLIT_BAR))
661		goto out;
662
663	ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5);
664	if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP))
665		rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]);
666	else
667		rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]);
668	ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET;
669	ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET;
670	ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET;
671
672out:
673	if (rc != 0)
674		device_printf(ntb->device,
675		    "unable to allocate pci resource\n");
676	return (rc);
677}
678
679static void
680print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar,
681    const char *kind)
682{
683
684	device_printf(ntb->device,
685	    "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
686	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
687	    (char *)bar->vbase + bar->size - 1,
688	    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
689	    (uintmax_t)bar->size, kind);
690}
691
692static int
693map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
694{
695
696	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
697	    &bar->pci_resource_id, RF_ACTIVE);
698	if (bar->pci_resource == NULL)
699		return (ENXIO);
700
701	save_bar_parameters(bar);
702	print_map_success(ntb, bar, "mmr");
703	return (0);
704}
705
706static int
707map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
708{
709	int rc;
710	uint8_t bar_size_bits = 0;
711
712	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
713	    &bar->pci_resource_id, RF_ACTIVE);
714
715	if (bar->pci_resource == NULL)
716		return (ENXIO);
717
718	save_bar_parameters(bar);
719	/*
720	 * Ivytown NTB BAR sizes are misreported by the hardware due to a
721	 * hardware issue. To work around this, query the size it should be
722	 * configured to by the device and modify the resource to correspond to
723	 * this new size. The BIOS on systems with this problem is required to
724	 * provide enough address space to allow the driver to make this change
725	 * safely.
726	 *
727	 * Ideally I could have just specified the size when I allocated the
728	 * resource like:
729	 *  bus_alloc_resource(ntb->device,
730	 *	SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul,
731	 *	1ul << bar_size_bits, RF_ACTIVE);
732	 * but the PCI driver does not honor the size in this call, so we have
733	 * to modify it after the fact.
734	 */
735	if (HAS_FEATURE(NTB_BAR_SIZE_4K)) {
736		if (bar->pci_resource_id == PCIR_BAR(2))
737			bar_size_bits = pci_read_config(ntb->device,
738			    XEON_PBAR23SZ_OFFSET, 1);
739		else
740			bar_size_bits = pci_read_config(ntb->device,
741			    XEON_PBAR45SZ_OFFSET, 1);
742
743		rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY,
744		    bar->pci_resource, bar->pbase,
745		    bar->pbase + (1ul << bar_size_bits) - 1);
746		if (rc != 0) {
747			device_printf(ntb->device,
748			    "unable to resize bar\n");
749			return (rc);
750		}
751
752		save_bar_parameters(bar);
753	}
754
755	/* Mark bar region as write combining to improve performance. */
756	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size,
757	    VM_MEMATTR_WRITE_COMBINING);
758	print_map_success(ntb, bar, "mw");
759	if (rc == 0)
760		device_printf(ntb->device,
761		    "Marked BAR%d v:[%p-%p] p:[%p-%p] as "
762		    "WRITE_COMBINING.\n",
763		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
764		    (char *)bar->vbase + bar->size - 1,
765		    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1));
766	else
767		device_printf(ntb->device,
768		    "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as "
769		    "WRITE_COMBINING: %d\n",
770		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
771		    (char *)bar->vbase + bar->size - 1,
772		    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
773		    rc);
774		/* Proceed anyway */
775	return (0);
776}
777
778static void
779ntb_unmap_pci_bar(struct ntb_softc *ntb)
780{
781	struct ntb_pci_bar_info *current_bar;
782	int i;
783
784	for (i = 0; i < NTB_MAX_BARS; i++) {
785		current_bar = &ntb->bar_info[i];
786		if (current_bar->pci_resource != NULL)
787			bus_release_resource(ntb->device, SYS_RES_MEMORY,
788			    current_bar->pci_resource_id,
789			    current_bar->pci_resource);
790	}
791}
792
793static int
794ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
795{
796	uint32_t i;
797	int rc;
798
799	for (i = 0; i < num_vectors; i++) {
800		ntb->int_info[i].rid = i + 1;
801		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
802		    SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE);
803		if (ntb->int_info[i].res == NULL) {
804			device_printf(ntb->device,
805			    "bus_alloc_resource failed\n");
806			return (ENOMEM);
807		}
808		ntb->int_info[i].tag = NULL;
809		ntb->allocated_interrupts++;
810		rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
811		    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr,
812		    &ntb->msix_vec[i], &ntb->int_info[i].tag);
813		if (rc != 0) {
814			device_printf(ntb->device, "bus_setup_intr failed\n");
815			return (ENXIO);
816		}
817	}
818	return (0);
819}
820
821/*
822 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector
823 * cannot be allocated for each MSI-X message.  JHB seems to think remapping
824 * should be okay.  This tunable should enable us to test that hypothesis
825 * when someone gets their hands on some Xeon hardware.
826 */
827static int ntb_force_remap_mode;
828SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN,
829    &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped"
830    " to a smaller number of ithreads, even if the desired number are "
831    "available");
832
833/*
834 * In case it is NOT ok, give consumers an abort button.
835 */
836static int ntb_prefer_intx;
837SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
838    &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather "
839    "than remapping MSI-X messages over available slots (match Linux driver "
840    "behavior)");
841
842/*
843 * Remap the desired number of MSI-X messages to available ithreads in a simple
844 * round-robin fashion.
845 */
846static int
847ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
848{
849	u_int *vectors;
850	uint32_t i;
851	int rc;
852
853	if (ntb_prefer_intx != 0)
854		return (ENXIO);
855
856	vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK);
857
858	for (i = 0; i < desired; i++)
859		vectors[i] = (i % avail) + 1;
860
861	rc = pci_remap_msix(dev, desired, vectors);
862	free(vectors, M_NTB);
863	return (rc);
864}
865
866static int
867ntb_init_isr(struct ntb_softc *ntb)
868{
869	uint32_t desired_vectors, num_vectors;
870	int rc;
871
872	ntb->allocated_interrupts = 0;
873	ntb->last_ts = ticks;
874
875	/*
876	 * Mask all doorbell interrupts.
877	 */
878	ntb_db_set_mask(ntb, ntb->db_valid_mask);
879
880	num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
881	    ntb->db_count);
882	if (desired_vectors >= 1) {
883		rc = pci_alloc_msix(ntb->device, &num_vectors);
884
885		if (ntb_force_remap_mode != 0 && rc == 0 &&
886		    num_vectors == desired_vectors)
887			num_vectors--;
888
889		if (rc == 0 && num_vectors < desired_vectors) {
890			rc = ntb_remap_msix(ntb->device, desired_vectors,
891			    num_vectors);
892			if (rc == 0)
893				num_vectors = desired_vectors;
894			else
895				pci_release_msi(ntb->device);
896		}
897		if (rc != 0)
898			num_vectors = 1;
899	} else
900		num_vectors = 1;
901
902	if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) {
903		ntb->db_vec_count = 1;
904		ntb->db_vec_shift = ntb->db_count;
905		rc = ntb_setup_legacy_interrupt(ntb);
906	} else {
907		ntb_create_msix_vec(ntb, num_vectors);
908		rc = ntb_setup_msix(ntb, num_vectors);
909	}
910	if (rc != 0) {
911		device_printf(ntb->device,
912		    "Error allocating interrupts: %d\n", rc);
913		ntb_free_msix_vec(ntb);
914	}
915
916	return (rc);
917}
918
919static int
920ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
921{
922	int rc;
923
924	ntb->int_info[0].rid = 0;
925	ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ,
926	    &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
927	if (ntb->int_info[0].res == NULL) {
928		device_printf(ntb->device, "bus_alloc_resource failed\n");
929		return (ENOMEM);
930	}
931
932	ntb->int_info[0].tag = NULL;
933	ntb->allocated_interrupts = 1;
934
935	rc = bus_setup_intr(ntb->device, ntb->int_info[0].res,
936	    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr,
937	    ntb, &ntb->int_info[0].tag);
938	if (rc != 0) {
939		device_printf(ntb->device, "bus_setup_intr failed\n");
940		return (ENXIO);
941	}
942
943	return (0);
944}
945
946static void
947ntb_teardown_interrupts(struct ntb_softc *ntb)
948{
949	struct ntb_int_info *current_int;
950	int i;
951
952	for (i = 0; i < ntb->allocated_interrupts; i++) {
953		current_int = &ntb->int_info[i];
954		if (current_int->tag != NULL)
955			bus_teardown_intr(ntb->device, current_int->res,
956			    current_int->tag);
957
958		if (current_int->res != NULL)
959			bus_release_resource(ntb->device, SYS_RES_IRQ,
960			    rman_get_rid(current_int->res), current_int->res);
961	}
962
963	ntb_free_msix_vec(ntb);
964	pci_release_msi(ntb->device);
965}
966
967/*
968 * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon.  Abstract it
969 * out to make code clearer.
970 */
971static inline uint64_t
972db_ioread(struct ntb_softc *ntb, uint64_t regoff)
973{
974
975	if (ntb->type == NTB_ATOM)
976		return (ntb_reg_read(8, regoff));
977
978	KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
979
980	return (ntb_reg_read(2, regoff));
981}
982
983static inline void
984db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
985{
986
987	KASSERT((val & ~ntb->db_valid_mask) == 0,
988	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
989	     (uintmax_t)(val & ~ntb->db_valid_mask),
990	     (uintmax_t)ntb->db_valid_mask));
991
992	if (regoff == ntb->self_reg->db_mask)
993		DB_MASK_ASSERT(ntb, MA_OWNED);
994
995	if (ntb->type == NTB_ATOM) {
996		ntb_reg_write(8, regoff, val);
997		return;
998	}
999
1000	KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
1001	ntb_reg_write(2, regoff, (uint16_t)val);
1002}
1003
1004void
1005ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits)
1006{
1007
1008	DB_MASK_LOCK(ntb);
1009	ntb->db_mask |= bits;
1010	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1011	DB_MASK_UNLOCK(ntb);
1012}
1013
1014void
1015ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits)
1016{
1017
1018	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1019	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1020	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1021	     (uintmax_t)ntb->db_valid_mask));
1022
1023	DB_MASK_LOCK(ntb);
1024	ntb->db_mask &= ~bits;
1025	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1026	DB_MASK_UNLOCK(ntb);
1027}
1028
1029uint64_t
1030ntb_db_read(struct ntb_softc *ntb)
1031{
1032
1033	return (db_ioread(ntb, ntb->self_reg->db_bell));
1034}
1035
1036void
1037ntb_db_clear(struct ntb_softc *ntb, uint64_t bits)
1038{
1039
1040	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1041	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1042	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1043	     (uintmax_t)ntb->db_valid_mask));
1044
1045	db_iowrite(ntb, ntb->self_reg->db_bell, bits);
1046}
1047
1048static inline uint64_t
1049ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
1050{
1051	uint64_t shift, mask;
1052
1053	shift = ntb->db_vec_shift;
1054	mask = (1ull << shift) - 1;
1055	return (mask << (shift * db_vector));
1056}
1057
1058static void
1059ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
1060{
1061	uint64_t vec_mask;
1062
1063	ntb->last_ts = ticks;
1064	vec_mask = ntb_vec_mask(ntb, vec);
1065
1066	if ((vec_mask & ntb->db_link_mask) != 0) {
1067		if (ntb_poll_link(ntb))
1068			ntb_link_event(ntb);
1069	}
1070
1071	if ((vec_mask & ntb->db_valid_mask) != 0)
1072		ntb_db_event(ntb, vec);
1073}
1074
1075static void
1076ndev_vec_isr(void *arg)
1077{
1078	struct ntb_vec *nvec = arg;
1079
1080	ntb_interrupt(nvec->ntb, nvec->num);
1081}
1082
1083static void
1084ndev_irq_isr(void *arg)
1085{
1086	/* If we couldn't set up MSI-X, we only have the one vector. */
1087	ntb_interrupt(arg, 0);
1088}
1089
1090static int
1091ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
1092{
1093	uint32_t i;
1094
1095	ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB,
1096	    M_ZERO | M_WAITOK);
1097	for (i = 0; i < num_vectors; i++) {
1098		ntb->msix_vec[i].num = i;
1099		ntb->msix_vec[i].ntb = ntb;
1100	}
1101
1102	return (0);
1103}
1104
1105static void
1106ntb_free_msix_vec(struct ntb_softc *ntb)
1107{
1108
1109	if (ntb->msix_vec == NULL)
1110		return;
1111
1112	free(ntb->msix_vec, M_NTB);
1113	ntb->msix_vec = NULL;
1114}
1115
1116static struct ntb_hw_info *
1117ntb_get_device_info(uint32_t device_id)
1118{
1119	struct ntb_hw_info *ep = pci_ids;
1120
1121	while (ep->device_id) {
1122		if (ep->device_id == device_id)
1123			return (ep);
1124		++ep;
1125	}
1126	return (NULL);
1127}
1128
1129static void
1130ntb_teardown_xeon(struct ntb_softc *ntb)
1131{
1132
1133	if (ntb->reg != NULL)
1134		ntb_link_disable(ntb);
1135}
1136
1137static void
1138ntb_detect_max_mw(struct ntb_softc *ntb)
1139{
1140
1141	if (ntb->type == NTB_ATOM) {
1142		ntb->mw_count = ATOM_MW_COUNT;
1143		return;
1144	}
1145
1146	if (HAS_FEATURE(NTB_SPLIT_BAR))
1147		ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT;
1148	else
1149		ntb->mw_count = XEON_SNB_MW_COUNT;
1150}
1151
1152static int
1153ntb_detect_xeon(struct ntb_softc *ntb)
1154{
1155	uint8_t ppd, conn_type;
1156
1157	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1);
1158	ntb->ppd = ppd;
1159
1160	if ((ppd & XEON_PPD_DEV_TYPE) != 0)
1161		ntb->dev_type = NTB_DEV_USD;
1162	else
1163		ntb->dev_type = NTB_DEV_DSD;
1164
1165	if ((ppd & XEON_PPD_SPLIT_BAR) != 0)
1166		ntb->features |= NTB_SPLIT_BAR;
1167
1168	/* SB01BASE_LOCKUP errata is a superset of SDOORBELL errata */
1169	if (HAS_FEATURE(NTB_SB01BASE_LOCKUP))
1170		ntb->features |= NTB_SDOORBELL_LOCKUP;
1171
1172	conn_type = ppd & XEON_PPD_CONN_TYPE;
1173	switch (conn_type) {
1174	case NTB_CONN_B2B:
1175		ntb->conn_type = conn_type;
1176		break;
1177	case NTB_CONN_RP:
1178	case NTB_CONN_TRANSPARENT:
1179	default:
1180		device_printf(ntb->device, "Unsupported connection type: %u\n",
1181		    (unsigned)conn_type);
1182		return (ENXIO);
1183	}
1184	return (0);
1185}
1186
1187static int
1188ntb_detect_atom(struct ntb_softc *ntb)
1189{
1190	uint32_t ppd, conn_type;
1191
1192	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
1193	ntb->ppd = ppd;
1194
1195	if ((ppd & ATOM_PPD_DEV_TYPE) != 0)
1196		ntb->dev_type = NTB_DEV_DSD;
1197	else
1198		ntb->dev_type = NTB_DEV_USD;
1199
1200	conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8;
1201	switch (conn_type) {
1202	case NTB_CONN_B2B:
1203		ntb->conn_type = conn_type;
1204		break;
1205	default:
1206		device_printf(ntb->device, "Unsupported NTB configuration\n");
1207		return (ENXIO);
1208	}
1209	return (0);
1210}
1211
1212static int
1213ntb_xeon_init_dev(struct ntb_softc *ntb)
1214{
1215	int rc;
1216
1217	ntb->spad_count		= XEON_SPAD_COUNT;
1218	ntb->db_count		= XEON_DB_COUNT;
1219	ntb->db_link_mask	= XEON_DB_LINK_BIT;
1220	ntb->db_vec_count	= XEON_DB_MSIX_VECTOR_COUNT;
1221	ntb->db_vec_shift	= XEON_DB_MSIX_VECTOR_SHIFT;
1222
1223	if (ntb->conn_type != NTB_CONN_B2B) {
1224		device_printf(ntb->device, "Connection type %d not supported\n",
1225		    ntb->conn_type);
1226		return (ENXIO);
1227	}
1228
1229	ntb->reg = &xeon_reg;
1230	ntb->self_reg = &xeon_pri_reg;
1231	ntb->peer_reg = &xeon_b2b_reg;
1232	ntb->xlat_reg = &xeon_sec_xlat;
1233
1234	/*
1235	 * There is a Xeon hardware errata related to writes to SDOORBELL or
1236	 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space,
1237	 * which may hang the system.  To workaround this use the second memory
1238	 * window to access the interrupt and scratch pad registers on the
1239	 * remote system.
1240	 */
1241	if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP))
1242		/* Use the last MW for mapping remote spad */
1243		ntb->b2b_mw_idx = ntb->mw_count - 1;
1244	else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14))
1245		/*
1246		 * HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1247		 * mirrored to the remote system.  Shrink the number of bits by one,
1248		 * since bit 14 is the last bit.
1249		 *
1250		 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register
1251		 * anyway.  Nor for non-B2B connection types.
1252		 */
1253		ntb->db_count = XEON_DB_COUNT - 1;
1254
1255	ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1256
1257	if (ntb->dev_type == NTB_DEV_USD)
1258		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr,
1259		    &xeon_b2b_usd_addr);
1260	else
1261		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr,
1262		    &xeon_b2b_dsd_addr);
1263	if (rc != 0)
1264		return (rc);
1265
1266	/* Enable Bus Master and Memory Space on the secondary side */
1267	ntb_reg_write(2, XEON_PCICMD_OFFSET,
1268	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1269
1270	/* Enable link training */
1271	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1272
1273	return (0);
1274}
1275
1276static int
1277ntb_atom_init_dev(struct ntb_softc *ntb)
1278{
1279
1280	KASSERT(ntb->conn_type == NTB_CONN_B2B,
1281	    ("Unsupported NTB configuration (%d)\n", ntb->conn_type));
1282
1283	ntb->spad_count		 = ATOM_SPAD_COUNT;
1284	ntb->db_count		 = ATOM_DB_COUNT;
1285	ntb->db_vec_count	 = ATOM_DB_MSIX_VECTOR_COUNT;
1286	ntb->db_vec_shift	 = ATOM_DB_MSIX_VECTOR_SHIFT;
1287	ntb->db_valid_mask	 = (1ull << ntb->db_count) - 1;
1288
1289	ntb->reg = &atom_reg;
1290	ntb->self_reg = &atom_pri_reg;
1291	ntb->peer_reg = &atom_b2b_reg;
1292	ntb->xlat_reg = &atom_sec_xlat;
1293
1294	/*
1295	 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is
1296	 * resolved.  Mask transaction layer internal parity errors.
1297	 */
1298	pci_write_config(ntb->device, 0xFC, 0x4, 4);
1299
1300	configure_atom_secondary_side_bars(ntb);
1301
1302	/* Enable Bus Master and Memory Space on the secondary side */
1303	ntb_reg_write(2, ATOM_PCICMD_OFFSET,
1304	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1305
1306	/* Initiate PCI-E link training */
1307	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1308
1309	callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
1310
1311	return (0);
1312}
1313
1314/* XXX: Linux driver doesn't seem to do any of this for Atom. */
1315static void
1316configure_atom_secondary_side_bars(struct ntb_softc *ntb)
1317{
1318
1319	if (ntb->dev_type == NTB_DEV_USD) {
1320		ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1321		    XEON_B2B_BAR2_DSD_ADDR64);
1322		ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1323		    XEON_B2B_BAR4_DSD_ADDR64);
1324		ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_USD_ADDR64);
1325		ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_USD_ADDR64);
1326	} else {
1327		ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1328		    XEON_B2B_BAR2_USD_ADDR64);
1329		ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1330		    XEON_B2B_BAR4_USD_ADDR64);
1331		ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_DSD_ADDR64);
1332		ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_DSD_ADDR64);
1333	}
1334}
1335
1336
1337/*
1338 * When working around Xeon SDOORBELL errata by remapping remote registers in a
1339 * MW, limit the B2B MW to half a MW.  By sharing a MW, half the shared MW
1340 * remains for use by a higher layer.
1341 *
1342 * Will only be used if working around SDOORBELL errata and the BIOS-configured
1343 * MW size is sufficiently large.
1344 */
1345static unsigned int ntb_b2b_mw_share;
1346SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share,
1347    0, "If enabled (non-zero), prefer to share half of the B2B peer register "
1348    "MW with higher level consumers.  Both sides of the NTB MUST set the same "
1349    "value here.");
1350
1351static void
1352xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx,
1353    enum ntb_bar regbar)
1354{
1355	struct ntb_pci_bar_info *bar;
1356	uint8_t bar_sz;
1357
1358	if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
1359		return;
1360
1361	bar = &ntb->bar_info[idx];
1362	bar_sz = pci_read_config(ntb->device, bar->psz_off, 1);
1363	if (idx == regbar) {
1364		if (ntb->b2b_off != 0)
1365			bar_sz--;
1366		else
1367			bar_sz = 0;
1368	}
1369	pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1);
1370	bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1);
1371	(void)bar_sz;
1372}
1373
1374static void
1375xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
1376    enum ntb_bar idx, enum ntb_bar regbar)
1377{
1378	uint64_t reg_val;
1379	uint32_t base_reg, lmt_reg;
1380
1381	bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg);
1382	if (idx == regbar)
1383		bar_addr += ntb->b2b_off;
1384
1385	if (!bar_is_64bit(ntb, idx)) {
1386		ntb_reg_write(4, base_reg, bar_addr);
1387		reg_val = ntb_reg_read(4, base_reg);
1388		(void)reg_val;
1389
1390		ntb_reg_write(4, lmt_reg, bar_addr);
1391		reg_val = ntb_reg_read(4, lmt_reg);
1392		(void)reg_val;
1393	} else {
1394		ntb_reg_write(8, base_reg, bar_addr);
1395		reg_val = ntb_reg_read(8, base_reg);
1396		(void)reg_val;
1397
1398		ntb_reg_write(8, lmt_reg, bar_addr);
1399		reg_val = ntb_reg_read(8, lmt_reg);
1400		(void)reg_val;
1401	}
1402}
1403
1404static void
1405xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
1406{
1407	struct ntb_pci_bar_info *bar;
1408
1409	bar = &ntb->bar_info[idx];
1410	if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
1411		ntb_reg_write(4, bar->pbarxlat_off, base_addr);
1412		base_addr = ntb_reg_read(4, bar->pbarxlat_off);
1413	} else {
1414		ntb_reg_write(8, bar->pbarxlat_off, base_addr);
1415		base_addr = ntb_reg_read(8, bar->pbarxlat_off);
1416	}
1417	(void)base_addr;
1418}
1419
1420static int
1421xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
1422    const struct ntb_b2b_addr *peer_addr)
1423{
1424	struct ntb_pci_bar_info *b2b_bar;
1425	vm_size_t bar_size;
1426	uint64_t bar_addr;
1427	enum ntb_bar b2b_bar_num, i;
1428
1429	if (ntb->b2b_mw_idx == B2B_MW_DISABLED) {
1430		b2b_bar = NULL;
1431		b2b_bar_num = NTB_CONFIG_BAR;
1432		ntb->b2b_off = 0;
1433	} else {
1434		b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
1435		KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
1436		    ("invalid b2b mw bar"));
1437
1438		b2b_bar = &ntb->bar_info[b2b_bar_num];
1439		bar_size = b2b_bar->size;
1440
1441		if (ntb_b2b_mw_share != 0 &&
1442		    (bar_size >> 1) >= XEON_B2B_MIN_SIZE)
1443			ntb->b2b_off = bar_size >> 1;
1444		else if (bar_size >= XEON_B2B_MIN_SIZE) {
1445			ntb->b2b_off = 0;
1446			ntb->mw_count--;
1447		} else {
1448			device_printf(ntb->device,
1449			    "B2B bar size is too small!\n");
1450			return (EIO);
1451		}
1452	}
1453
1454	/*
1455	 * Reset the secondary bar sizes to match the primary bar sizes.
1456	 * (Except, disable or halve the size of the B2B secondary bar.)
1457	 */
1458	for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++)
1459		xeon_reset_sbar_size(ntb, i, b2b_bar_num);
1460
1461	bar_addr = 0;
1462	if (b2b_bar_num == NTB_CONFIG_BAR)
1463		bar_addr = addr->bar0_addr;
1464	else if (b2b_bar_num == NTB_B2B_BAR_1)
1465		bar_addr = addr->bar2_addr64;
1466	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR))
1467		bar_addr = addr->bar4_addr64;
1468	else if (b2b_bar_num == NTB_B2B_BAR_2)
1469		bar_addr = addr->bar4_addr32;
1470	else if (b2b_bar_num == NTB_B2B_BAR_3)
1471		bar_addr = addr->bar5_addr32;
1472	else
1473		KASSERT(false, ("invalid bar"));
1474
1475	ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
1476
1477	/*
1478	 * Other SBARs are normally hit by the PBAR xlat, except for the b2b
1479	 * register BAR.  The B2B BAR is either disabled above or configured
1480	 * half-size.  It starts at PBAR xlat + offset.
1481	 *
1482	 * Also set up incoming BAR limits == base (zero length window).
1483	 */
1484	xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1,
1485	    b2b_bar_num);
1486	if (HAS_FEATURE(NTB_SPLIT_BAR)) {
1487		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32,
1488		    NTB_B2B_BAR_2, b2b_bar_num);
1489		xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32,
1490		    NTB_B2B_BAR_3, b2b_bar_num);
1491	} else
1492		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64,
1493		    NTB_B2B_BAR_2, b2b_bar_num);
1494
1495	/* Zero incoming translation addrs */
1496	ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
1497	ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
1498
1499	/* Zero outgoing translation limits (whole bar size windows) */
1500	ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
1501	ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
1502
1503	/* Set outgoing translation offsets */
1504	xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
1505	if (HAS_FEATURE(NTB_SPLIT_BAR)) {
1506		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2);
1507		xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3);
1508	} else
1509		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2);
1510
1511	/* Set the translation offset for B2B registers */
1512	bar_addr = 0;
1513	if (b2b_bar_num == NTB_CONFIG_BAR)
1514		bar_addr = peer_addr->bar0_addr;
1515	else if (b2b_bar_num == NTB_B2B_BAR_1)
1516		bar_addr = peer_addr->bar2_addr64;
1517	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR))
1518		bar_addr = peer_addr->bar4_addr64;
1519	else if (b2b_bar_num == NTB_B2B_BAR_2)
1520		bar_addr = peer_addr->bar4_addr32;
1521	else if (b2b_bar_num == NTB_B2B_BAR_3)
1522		bar_addr = peer_addr->bar5_addr32;
1523	else
1524		KASSERT(false, ("invalid bar"));
1525
1526	/*
1527	 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
1528	 * at a time.
1529	 */
1530	ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
1531	ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
1532	return (0);
1533}
1534
1535static inline bool
1536link_is_up(struct ntb_softc *ntb)
1537{
1538
1539	if (ntb->type == NTB_XEON) {
1540		if (ntb->conn_type == NTB_CONN_TRANSPARENT)
1541			return (true);
1542		return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0);
1543	}
1544
1545	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
1546	return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0);
1547}
1548
1549static inline bool
1550atom_link_is_err(struct ntb_softc *ntb)
1551{
1552	uint32_t status;
1553
1554	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
1555
1556	status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
1557	if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
1558		return (true);
1559
1560	status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
1561	return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
1562}
1563
1564/* Atom does not have link status interrupt, poll on that platform */
1565static void
1566atom_link_hb(void *arg)
1567{
1568	struct ntb_softc *ntb = arg;
1569	sbintime_t timo, poll_ts;
1570
1571	timo = NTB_HB_TIMEOUT * hz;
1572	poll_ts = ntb->last_ts + timo;
1573
1574	/*
1575	 * Delay polling the link status if an interrupt was received, unless
1576	 * the cached link status says the link is down.
1577	 */
1578	if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) {
1579		timo = poll_ts - ticks;
1580		goto out;
1581	}
1582
1583	if (ntb_poll_link(ntb))
1584		ntb_link_event(ntb);
1585
1586	if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
1587		/* Link is down with error, proceed with recovery */
1588		callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb);
1589		return;
1590	}
1591
1592out:
1593	callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb);
1594}
1595
1596static void
1597atom_perform_link_restart(struct ntb_softc *ntb)
1598{
1599	uint32_t status;
1600
1601	/* Driver resets the NTB ModPhy lanes - magic! */
1602	ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
1603	ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
1604	ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
1605	ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
1606
1607	/* Driver waits 100ms to allow the NTB ModPhy to settle */
1608	pause("ModPhy", hz / 10);
1609
1610	/* Clear AER Errors, write to clear */
1611	status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
1612	status &= PCIM_AER_COR_REPLAY_ROLLOVER;
1613	ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
1614
1615	/* Clear unexpected electrical idle event in LTSSM, write to clear */
1616	status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
1617	status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1618	ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
1619
1620	/* Clear DeSkew Buffer error, write to clear */
1621	status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
1622	status |= ATOM_DESKEWSTS_DBERR;
1623	ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
1624
1625	status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
1626	status &= ATOM_IBIST_ERR_OFLOW;
1627	ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
1628
1629	/* Releases the NTB state machine to allow the link to retrain */
1630	status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
1631	status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1632	ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
1633}
1634
1635/*
1636 * ntb_set_ctx() - associate a driver context with an ntb device
1637 * @ntb:        NTB device context
1638 * @ctx:        Driver context
1639 * @ctx_ops:    Driver context operations
1640 *
1641 * Associate a driver context and operations with a ntb device.  The context is
1642 * provided by the client driver, and the driver may associate a different
1643 * context with each ntb device.
1644 *
1645 * Return: Zero if the context is associated, otherwise an error number.
1646 */
1647int
1648ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops)
1649{
1650
1651	if (ctx == NULL || ops == NULL)
1652		return (EINVAL);
1653	if (ntb->ctx_ops != NULL)
1654		return (EINVAL);
1655
1656	CTX_LOCK(ntb);
1657	if (ntb->ctx_ops != NULL) {
1658		CTX_UNLOCK(ntb);
1659		return (EINVAL);
1660	}
1661	ntb->ntb_ctx = ctx;
1662	ntb->ctx_ops = ops;
1663	CTX_UNLOCK(ntb);
1664
1665	return (0);
1666}
1667
1668/*
1669 * It is expected that this will only be used from contexts where the ctx_lock
1670 * is not needed to protect ntb_ctx lifetime.
1671 */
1672void *
1673ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops)
1674{
1675
1676	KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus"));
1677	if (ops != NULL)
1678		*ops = ntb->ctx_ops;
1679	return (ntb->ntb_ctx);
1680}
1681
1682/*
1683 * ntb_clear_ctx() - disassociate any driver context from an ntb device
1684 * @ntb:        NTB device context
1685 *
1686 * Clear any association that may exist between a driver context and the ntb
1687 * device.
1688 */
1689void
1690ntb_clear_ctx(struct ntb_softc *ntb)
1691{
1692
1693	CTX_LOCK(ntb);
1694	ntb->ntb_ctx = NULL;
1695	ntb->ctx_ops = NULL;
1696	CTX_UNLOCK(ntb);
1697}
1698
1699/*
1700 * ntb_link_event() - notify driver context of a change in link status
1701 * @ntb:        NTB device context
1702 *
1703 * Notify the driver context that the link status may have changed.  The driver
1704 * should call ntb_link_is_up() to get the current status.
1705 */
1706void
1707ntb_link_event(struct ntb_softc *ntb)
1708{
1709
1710	CTX_LOCK(ntb);
1711	if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL)
1712		ntb->ctx_ops->link_event(ntb->ntb_ctx);
1713	CTX_UNLOCK(ntb);
1714}
1715
1716/*
1717 * ntb_db_event() - notify driver context of a doorbell event
1718 * @ntb:        NTB device context
1719 * @vector:     Interrupt vector number
1720 *
1721 * Notify the driver context of a doorbell event.  If hardware supports
1722 * multiple interrupt vectors for doorbells, the vector number indicates which
1723 * vector received the interrupt.  The vector number is relative to the first
1724 * vector used for doorbells, starting at zero, and must be less than
1725 * ntb_db_vector_count().  The driver may call ntb_db_read() to check which
1726 * doorbell bits need service, and ntb_db_vector_mask() to determine which of
1727 * those bits are associated with the vector number.
1728 */
1729static void
1730ntb_db_event(struct ntb_softc *ntb, uint32_t vec)
1731{
1732
1733	CTX_LOCK(ntb);
1734	if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL)
1735		ntb->ctx_ops->db_event(ntb->ntb_ctx, vec);
1736	CTX_UNLOCK(ntb);
1737}
1738
1739/*
1740 * ntb_link_enable() - enable the link on the secondary side of the ntb
1741 * @ntb:        NTB device context
1742 * @max_speed:  The maximum link speed expressed as PCIe generation number[0]
1743 * @max_width:  The maximum link width expressed as the number of PCIe lanes[0]
1744 *
1745 * Enable the link on the secondary side of the ntb.  This can only be done
1746 * from the primary side of the ntb in primary or b2b topology.  The ntb device
1747 * should train the link to its maximum speed and width, or the requested speed
1748 * and width, whichever is smaller, if supported.
1749 *
1750 * Return: Zero on success, otherwise an error number.
1751 *
1752 * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed
1753 *      and width input will be ignored.
1754 */
1755int
1756ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused,
1757    enum ntb_width w __unused)
1758{
1759	uint32_t cntl;
1760
1761	if (ntb->type == NTB_ATOM) {
1762		pci_write_config(ntb->device, NTB_PPD_OFFSET,
1763		    ntb->ppd | ATOM_PPD_INIT_LINK, 4);
1764		return (0);
1765	}
1766
1767	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
1768		ntb_link_event(ntb);
1769		return (0);
1770	}
1771
1772	cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
1773	cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
1774	cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
1775	cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
1776	if (HAS_FEATURE(NTB_SPLIT_BAR))
1777		cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
1778	ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
1779	return (0);
1780}
1781
1782/*
1783 * ntb_link_disable() - disable the link on the secondary side of the ntb
1784 * @ntb:        NTB device context
1785 *
1786 * Disable the link on the secondary side of the ntb.  This can only be done
1787 * from the primary side of the ntb in primary or b2b topology.  The ntb device
1788 * should disable the link.  Returning from this call must indicate that a
1789 * barrier has passed, though with no more writes may pass in either direction
1790 * across the link, except if this call returns an error number.
1791 *
1792 * Return: Zero on success, otherwise an error number.
1793 */
1794int
1795ntb_link_disable(struct ntb_softc *ntb)
1796{
1797	uint32_t cntl;
1798
1799	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
1800		ntb_link_event(ntb);
1801		return (0);
1802	}
1803
1804	cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
1805	cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
1806	cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
1807	if (HAS_FEATURE(NTB_SPLIT_BAR))
1808		cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
1809	cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
1810	ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
1811	return (0);
1812}
1813
1814static void
1815recover_atom_link(void *arg)
1816{
1817	struct ntb_softc *ntb = arg;
1818	unsigned speed, width, oldspeed, oldwidth;
1819	uint32_t status32;
1820
1821	atom_perform_link_restart(ntb);
1822
1823	/*
1824	 * There is a potential race between the 2 NTB devices recovering at
1825	 * the same time.  If the times are the same, the link will not recover
1826	 * and the driver will be stuck in this loop forever.  Add a random
1827	 * interval to the recovery time to prevent this race.
1828	 */
1829	status32 = arc4random() % ATOM_LINK_RECOVERY_TIME;
1830	pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000);
1831
1832	if (atom_link_is_err(ntb))
1833		goto retry;
1834
1835	status32 = ntb_reg_read(4, ntb->reg->ntb_ctl);
1836	if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
1837		goto out;
1838
1839	status32 = ntb_reg_read(4, ntb->reg->lnk_sta);
1840	width = NTB_LNK_STA_WIDTH(status32);
1841	speed = status32 & NTB_LINK_SPEED_MASK;
1842
1843	oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta);
1844	oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK;
1845	if (oldwidth != width || oldspeed != speed)
1846		goto retry;
1847
1848out:
1849	callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb,
1850	    ntb);
1851	return;
1852
1853retry:
1854	callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link,
1855	    ntb);
1856}
1857
1858/*
1859 * Polls the HW link status register(s); returns true if something has changed.
1860 */
1861static bool
1862ntb_poll_link(struct ntb_softc *ntb)
1863{
1864	uint32_t ntb_cntl;
1865	uint16_t reg_val;
1866
1867	if (ntb->type == NTB_ATOM) {
1868		ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
1869		if (ntb_cntl == ntb->ntb_ctl)
1870			return (false);
1871
1872		ntb->ntb_ctl = ntb_cntl;
1873		ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta);
1874	} else {
1875		db_iowrite(ntb, ntb->self_reg->db_bell, ntb->db_link_mask);
1876
1877		reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
1878		if (reg_val == ntb->lnk_sta)
1879			return (false);
1880
1881		ntb->lnk_sta = reg_val;
1882	}
1883	return (true);
1884}
1885
1886static inline enum ntb_speed
1887ntb_link_sta_speed(struct ntb_softc *ntb)
1888{
1889
1890	if (!link_is_up(ntb))
1891		return (NTB_SPEED_NONE);
1892	return (ntb->lnk_sta & NTB_LINK_SPEED_MASK);
1893}
1894
1895static inline enum ntb_width
1896ntb_link_sta_width(struct ntb_softc *ntb)
1897{
1898
1899	if (!link_is_up(ntb))
1900		return (NTB_WIDTH_NONE);
1901	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
1902}
1903
1904/*
1905 * Public API to the rest of the OS
1906 */
1907
1908/**
1909 * ntb_get_max_spads() - get the total scratch regs usable
1910 * @ntb: pointer to ntb_softc instance
1911 *
1912 * This function returns the max 32bit scratchpad registers usable by the
1913 * upper layer.
1914 *
1915 * RETURNS: total number of scratch pad registers available
1916 */
1917uint8_t
1918ntb_get_max_spads(struct ntb_softc *ntb)
1919{
1920
1921	return (ntb->spad_count);
1922}
1923
1924uint8_t
1925ntb_mw_count(struct ntb_softc *ntb)
1926{
1927
1928	return (ntb->mw_count);
1929}
1930
1931/**
1932 * ntb_spad_write() - write to the secondary scratchpad register
1933 * @ntb: pointer to ntb_softc instance
1934 * @idx: index to the scratchpad register, 0 based
1935 * @val: the data value to put into the register
1936 *
1937 * This function allows writing of a 32bit value to the indexed scratchpad
1938 * register. The register resides on the secondary (external) side.
1939 *
1940 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
1941 */
1942int
1943ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
1944{
1945
1946	if (idx >= ntb->spad_count)
1947		return (EINVAL);
1948
1949	ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
1950
1951	return (0);
1952}
1953
1954/**
1955 * ntb_spad_read() - read from the primary scratchpad register
1956 * @ntb: pointer to ntb_softc instance
1957 * @idx: index to scratchpad register, 0 based
1958 * @val: pointer to 32bit integer for storing the register value
1959 *
1960 * This function allows reading of the 32bit scratchpad register on
1961 * the primary (internal) side.
1962 *
1963 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
1964 */
1965int
1966ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val)
1967{
1968
1969	if (idx >= ntb->spad_count)
1970		return (EINVAL);
1971
1972	*val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
1973
1974	return (0);
1975}
1976
1977/**
1978 * ntb_peer_spad_write() - write to the secondary scratchpad register
1979 * @ntb: pointer to ntb_softc instance
1980 * @idx: index to the scratchpad register, 0 based
1981 * @val: the data value to put into the register
1982 *
1983 * This function allows writing of a 32bit value to the indexed scratchpad
1984 * register. The register resides on the secondary (external) side.
1985 *
1986 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
1987 */
1988int
1989ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
1990{
1991
1992	if (idx >= ntb->spad_count)
1993		return (EINVAL);
1994
1995	if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP))
1996		ntb_mw_write(4, XEON_SHADOW_SPAD_OFFSET + idx * 4, val);
1997	else
1998		ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
1999
2000	return (0);
2001}
2002
2003/**
2004 * ntb_peer_spad_read() - read from the primary scratchpad register
2005 * @ntb: pointer to ntb_softc instance
2006 * @idx: index to scratchpad register, 0 based
2007 * @val: pointer to 32bit integer for storing the register value
2008 *
2009 * This function allows reading of the 32bit scratchpad register on
2010 * the primary (internal) side.
2011 *
2012 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
2013 */
2014int
2015ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val)
2016{
2017
2018	if (idx >= ntb->spad_count)
2019		return (EINVAL);
2020
2021	if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP))
2022		*val = ntb_mw_read(4, XEON_SHADOW_SPAD_OFFSET + idx * 4);
2023	else
2024		*val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
2025
2026	return (0);
2027}
2028
2029/*
2030 * ntb_mw_get_range() - get the range of a memory window
2031 * @ntb:        NTB device context
2032 * @idx:        Memory window number
2033 * @base:       OUT - the base address for mapping the memory window
2034 * @size:       OUT - the size for mapping the memory window
2035 * @align:      OUT - the base alignment for translating the memory window
2036 * @align_size: OUT - the size alignment for translating the memory window
2037 *
2038 * Get the range of a memory window.  NULL may be given for any output
2039 * parameter if the value is not needed.  The base and size may be used for
2040 * mapping the memory window, to access the peer memory.  The alignment and
2041 * size may be used for translating the memory window, for the peer to access
2042 * memory on the local system.
2043 *
2044 * Return: Zero on success, otherwise an error number.
2045 */
2046int
2047ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base,
2048    void **vbase, size_t *size, size_t *align, size_t *align_size)
2049{
2050	struct ntb_pci_bar_info *bar;
2051	size_t bar_b2b_off;
2052
2053	if (mw_idx >= ntb_mw_count(ntb))
2054		return (EINVAL);
2055
2056	bar = &ntb->bar_info[ntb_mw_to_bar(ntb, mw_idx)];
2057	bar_b2b_off = 0;
2058	if (mw_idx == ntb->b2b_mw_idx) {
2059		KASSERT(ntb->b2b_off != 0,
2060		    ("user shouldn't get non-shared b2b mw"));
2061		bar_b2b_off = ntb->b2b_off;
2062	}
2063
2064	if (base != NULL)
2065		*base = bar->pbase + bar_b2b_off;
2066	if (vbase != NULL)
2067		*vbase = (char *)bar->vbase + bar_b2b_off;
2068	if (size != NULL)
2069		*size = bar->size - bar_b2b_off;
2070	if (align != NULL)
2071		*align = bar->size;
2072	if (align_size != NULL)
2073		*align_size = 1;
2074	return (0);
2075}
2076
2077/*
2078 * ntb_mw_set_trans() - set the translation of a memory window
2079 * @ntb:        NTB device context
2080 * @idx:        Memory window number
2081 * @addr:       The dma address local memory to expose to the peer
2082 * @size:       The size of the local memory to expose to the peer
2083 *
2084 * Set the translation of a memory window.  The peer may access local memory
2085 * through the window starting at the address, up to the size.  The address
2086 * must be aligned to the alignment specified by ntb_mw_get_range().  The size
2087 * must be aligned to the size alignment specified by ntb_mw_get_range().
2088 *
2089 * Return: Zero on success, otherwise an error number.
2090 */
2091int
2092ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr,
2093    size_t size)
2094{
2095	struct ntb_pci_bar_info *bar;
2096	uint64_t base, limit, reg_val;
2097	size_t bar_size, mw_size;
2098	uint32_t base_reg, xlat_reg, limit_reg;
2099	enum ntb_bar bar_num;
2100
2101	if (idx >= ntb_mw_count(ntb))
2102		return (EINVAL);
2103
2104	bar_num = ntb_mw_to_bar(ntb, idx);
2105	bar = &ntb->bar_info[bar_num];
2106
2107	bar_size = bar->size;
2108	if (idx == ntb->b2b_mw_idx)
2109		mw_size = bar_size - ntb->b2b_off;
2110	else
2111		mw_size = bar_size;
2112
2113	/* Hardware requires that addr is aligned to bar size */
2114	if ((addr & (bar_size - 1)) != 0)
2115		return (EINVAL);
2116
2117	if (size > mw_size)
2118		return (EINVAL);
2119
2120	bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg);
2121
2122	limit = 0;
2123	if (bar_is_64bit(ntb, bar_num)) {
2124		base = ntb_reg_read(8, base_reg);
2125
2126		if (limit_reg != 0 && size != mw_size)
2127			limit = base + size;
2128
2129		/* Set and verify translation address */
2130		ntb_reg_write(8, xlat_reg, addr);
2131		reg_val = ntb_reg_read(8, xlat_reg);
2132		if (reg_val != addr) {
2133			ntb_reg_write(8, xlat_reg, 0);
2134			return (EIO);
2135		}
2136
2137		/* Set and verify the limit */
2138		ntb_reg_write(8, limit_reg, limit);
2139		reg_val = ntb_reg_read(8, limit_reg);
2140		if (reg_val != limit) {
2141			ntb_reg_write(8, limit_reg, base);
2142			ntb_reg_write(8, xlat_reg, 0);
2143			return (EIO);
2144		}
2145	} else {
2146		/* Configure 32-bit (split) BAR MW */
2147
2148		if ((addr & ~UINT32_MAX) != 0)
2149			return (EINVAL);
2150		if (((addr + size) & ~UINT32_MAX) != 0)
2151			return (EINVAL);
2152
2153		base = ntb_reg_read(4, base_reg);
2154
2155		if (limit_reg != 0 && size != mw_size)
2156			limit = base + size;
2157
2158		/* Set and verify translation address */
2159		ntb_reg_write(4, xlat_reg, addr);
2160		reg_val = ntb_reg_read(4, xlat_reg);
2161		if (reg_val != addr) {
2162			ntb_reg_write(4, xlat_reg, 0);
2163			return (EIO);
2164		}
2165
2166		/* Set and verify the limit */
2167		ntb_reg_write(4, limit_reg, limit);
2168		reg_val = ntb_reg_read(4, limit_reg);
2169		if (reg_val != limit) {
2170			ntb_reg_write(4, limit_reg, base);
2171			ntb_reg_write(4, xlat_reg, 0);
2172			return (EIO);
2173		}
2174	}
2175	return (0);
2176}
2177
2178/*
2179 * ntb_mw_clear_trans() - clear the translation of a memory window
2180 * @ntb:	NTB device context
2181 * @idx:	Memory window number
2182 *
2183 * Clear the translation of a memory window.  The peer may no longer access
2184 * local memory through the window.
2185 *
2186 * Return: Zero on success, otherwise an error number.
2187 */
2188int
2189ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx)
2190{
2191
2192	return (ntb_mw_set_trans(ntb, mw_idx, 0, 0));
2193}
2194
2195/**
2196 * ntb_peer_db_set() - Set the doorbell on the secondary/external side
2197 * @ntb: pointer to ntb_softc instance
2198 * @bit: doorbell bits to ring
2199 *
2200 * This function allows triggering of a doorbell on the secondary/external
2201 * side that will initiate an interrupt on the remote host
2202 */
2203void
2204ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit)
2205{
2206
2207	if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) {
2208		ntb_mw_write(2, XEON_SHADOW_PDOORBELL_OFFSET, bit);
2209		return;
2210	}
2211
2212	db_iowrite(ntb, ntb->peer_reg->db_bell, bit);
2213}
2214
2215/*
2216 * ntb_get_peer_db_addr() - Return the address of the remote doorbell register,
2217 * as well as the size of the register (via *sz_out).
2218 *
2219 * This function allows a caller using I/OAT DMA to chain the remote doorbell
2220 * ring to its memory window write.
2221 *
2222 * Note that writing the peer doorbell via a memory window will *not* generate
2223 * an interrupt on the remote host; that must be done seperately.
2224 */
2225bus_addr_t
2226ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out)
2227{
2228	struct ntb_pci_bar_info *bar;
2229	uint64_t regoff;
2230
2231	KASSERT(sz_out != NULL, ("must be non-NULL"));
2232
2233	if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) {
2234		bar = &ntb->bar_info[NTB_CONFIG_BAR];
2235		regoff = ntb->peer_reg->db_bell;
2236	} else {
2237		KASSERT((HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 2) ||
2238		    (!HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 1),
2239		    ("mw_count invalid after setup"));
2240		KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
2241		    ("invalid b2b idx"));
2242
2243		bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
2244		regoff = XEON_SHADOW_PDOORBELL_OFFSET;
2245	}
2246	KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
2247
2248	*sz_out = ntb->reg->db_size;
2249	/* HACK: Specific to current x86 bus implementation. */
2250	return ((uint64_t)bar->pci_bus_handle + regoff);
2251}
2252
2253/*
2254 * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
2255 * @ntb:	NTB device context
2256 *
2257 * Hardware may support different number or arrangement of doorbell bits.
2258 *
2259 * Return: A mask of doorbell bits supported by the ntb.
2260 */
2261uint64_t
2262ntb_db_valid_mask(struct ntb_softc *ntb)
2263{
2264
2265	return (ntb->db_valid_mask);
2266}
2267
2268/*
2269 * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
2270 * @ntb:	NTB device context
2271 * @vector:	Doorbell vector number
2272 *
2273 * Each interrupt vector may have a different number or arrangement of bits.
2274 *
2275 * Return: A mask of doorbell bits serviced by a vector.
2276 */
2277uint64_t
2278ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector)
2279{
2280
2281	if (vector > ntb->db_vec_count)
2282		return (0);
2283	return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector));
2284}
2285
2286/**
2287 * ntb_link_is_up() - get the current ntb link state
2288 * @ntb:        NTB device context
2289 * @speed:      OUT - The link speed expressed as PCIe generation number
2290 * @width:      OUT - The link width expressed as the number of PCIe lanes
2291 *
2292 * RETURNS: true or false based on the hardware link state
2293 */
2294bool
2295ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed,
2296    enum ntb_width *width)
2297{
2298
2299	if (speed != NULL)
2300		*speed = ntb_link_sta_speed(ntb);
2301	if (width != NULL)
2302		*width = ntb_link_sta_width(ntb);
2303	return (link_is_up(ntb));
2304}
2305
2306static void
2307save_bar_parameters(struct ntb_pci_bar_info *bar)
2308{
2309
2310	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
2311	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
2312	bar->pbase = rman_get_start(bar->pci_resource);
2313	bar->size = rman_get_size(bar->pci_resource);
2314	bar->vbase = rman_get_virtual(bar->pci_resource);
2315}
2316
2317device_t
2318ntb_get_device(struct ntb_softc *ntb)
2319{
2320
2321	return (ntb->device);
2322}
2323
2324/* Export HW-specific errata information. */
2325bool
2326ntb_has_feature(struct ntb_softc *ntb, uint64_t feature)
2327{
2328
2329	return (HAS_FEATURE(feature));
2330}
2331