ntb_hw_intel.c revision 291030
1/*- 2 * Copyright (C) 2013 Intel Corporation 3 * Copyright (C) 2015 EMC Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/ntb/ntb_hw/ntb_hw.c 291030 2015-11-18 22:20:13Z cem $"); 30 31#include <sys/param.h> 32#include <sys/kernel.h> 33#include <sys/systm.h> 34#include <sys/bus.h> 35#include <sys/endian.h> 36#include <sys/malloc.h> 37#include <sys/module.h> 38#include <sys/queue.h> 39#include <sys/rman.h> 40#include <sys/sbuf.h> 41#include <sys/sysctl.h> 42#include <vm/vm.h> 43#include <vm/pmap.h> 44#include <machine/bus.h> 45#include <machine/pmap.h> 46#include <machine/resource.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49 50#include "ntb_regs.h" 51#include "ntb_hw.h" 52 53/* 54 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 55 * allows you to connect two systems using a PCI-e link. 56 * 57 * This module contains the hardware abstraction layer for the NTB. It allows 58 * you to send and recieve interrupts, map the memory windows and send and 59 * receive messages in the scratch-pad registers. 60 * 61 * NOTE: Much of the code in this module is shared with Linux. Any patches may 62 * be picked up and redistributed in Linux with a dual GPL/BSD license. 63 */ 64 65#define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT) 66 67#define NTB_HB_TIMEOUT 1 /* second */ 68#define ATOM_LINK_RECOVERY_TIME 500 /* ms */ 69 70#define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) 71 72enum ntb_device_type { 73 NTB_XEON, 74 NTB_ATOM 75}; 76 77/* ntb_conn_type are hardware numbers, cannot change. */ 78enum ntb_conn_type { 79 NTB_CONN_TRANSPARENT = 0, 80 NTB_CONN_B2B = 1, 81 NTB_CONN_RP = 2, 82}; 83 84enum ntb_b2b_direction { 85 NTB_DEV_USD = 0, 86 NTB_DEV_DSD = 1, 87}; 88 89enum ntb_bar { 90 NTB_CONFIG_BAR = 0, 91 NTB_B2B_BAR_1, 92 NTB_B2B_BAR_2, 93 NTB_B2B_BAR_3, 94 NTB_MAX_BARS 95}; 96 97/* Device features and workarounds */ 98#define HAS_FEATURE(feature) \ 99 ((ntb->features & (feature)) != 0) 100 101struct ntb_hw_info { 102 uint32_t device_id; 103 const char *desc; 104 enum ntb_device_type type; 105 uint32_t features; 106}; 107 108struct ntb_pci_bar_info { 109 bus_space_tag_t pci_bus_tag; 110 bus_space_handle_t pci_bus_handle; 111 int pci_resource_id; 112 struct resource *pci_resource; 113 vm_paddr_t pbase; 114 caddr_t vbase; 115 vm_size_t size; 116 117 /* Configuration register offsets */ 118 uint32_t psz_off; 119 uint32_t ssz_off; 120 uint32_t pbarxlat_off; 121}; 122 123struct ntb_int_info { 124 struct resource *res; 125 int rid; 126 void *tag; 127}; 128 129struct ntb_vec { 130 struct ntb_softc *ntb; 131 uint32_t num; 132}; 133 134struct ntb_reg { 135 uint32_t ntb_ctl; 136 uint32_t lnk_sta; 137 uint8_t db_size; 138 unsigned mw_bar[NTB_MAX_BARS]; 139}; 140 141struct ntb_alt_reg { 142 uint32_t db_bell; 143 uint32_t db_mask; 144 uint32_t spad; 145}; 146 147struct ntb_xlat_reg { 148 uint32_t bar0_base; 149 uint32_t bar2_base; 150 uint32_t bar4_base; 151 uint32_t bar5_base; 152 153 uint32_t bar2_xlat; 154 uint32_t bar4_xlat; 155 uint32_t bar5_xlat; 156 157 uint32_t bar2_limit; 158 uint32_t bar4_limit; 159 uint32_t bar5_limit; 160}; 161 162struct ntb_b2b_addr { 163 uint64_t bar0_addr; 164 uint64_t bar2_addr64; 165 uint64_t bar4_addr64; 166 uint64_t bar4_addr32; 167 uint64_t bar5_addr32; 168}; 169 170struct ntb_softc { 171 device_t device; 172 enum ntb_device_type type; 173 uint32_t features; 174 175 struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; 176 struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; 177 uint32_t allocated_interrupts; 178 179 struct callout heartbeat_timer; 180 struct callout lr_timer; 181 182 void *ntb_ctx; 183 const struct ntb_ctx_ops *ctx_ops; 184 struct ntb_vec *msix_vec; 185#define CTX_LOCK(sc) mtx_lock(&(sc)->ctx_lock) 186#define CTX_UNLOCK(sc) mtx_unlock(&(sc)->ctx_lock) 187#define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f)) 188 struct mtx ctx_lock; 189 190 uint32_t ppd; 191 enum ntb_conn_type conn_type; 192 enum ntb_b2b_direction dev_type; 193 194 /* Offset of peer bar0 in B2B BAR */ 195 uint64_t b2b_off; 196 /* Memory window used to access peer bar0 */ 197#define B2B_MW_DISABLED UINT8_MAX 198 uint8_t b2b_mw_idx; 199 200 uint8_t mw_count; 201 uint8_t spad_count; 202 uint8_t db_count; 203 uint8_t db_vec_count; 204 uint8_t db_vec_shift; 205 206 /* Protects local db_mask. */ 207#define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) 208#define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) 209#define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) 210 struct mtx db_mask_lock; 211 212 volatile uint32_t ntb_ctl; 213 volatile uint32_t lnk_sta; 214 215 uint64_t db_valid_mask; 216 uint64_t db_link_mask; 217 uint64_t db_mask; 218 219 int last_ts; /* ticks @ last irq */ 220 221 const struct ntb_reg *reg; 222 const struct ntb_alt_reg *self_reg; 223 const struct ntb_alt_reg *peer_reg; 224 const struct ntb_xlat_reg *xlat_reg; 225}; 226 227#ifdef __i386__ 228static __inline uint64_t 229bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 230 bus_size_t offset) 231{ 232 233 return (bus_space_read_4(tag, handle, offset) | 234 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 235} 236 237static __inline void 238bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 239 bus_size_t offset, uint64_t val) 240{ 241 242 bus_space_write_4(tag, handle, offset, val); 243 bus_space_write_4(tag, handle, offset + 4, val >> 32); 244} 245#endif 246 247#define ntb_bar_read(SIZE, bar, offset) \ 248 bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 249 ntb->bar_info[(bar)].pci_bus_handle, (offset)) 250#define ntb_bar_write(SIZE, bar, offset, val) \ 251 bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 252 ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) 253#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) 254#define ntb_reg_write(SIZE, offset, val) \ 255 ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) 256#define ntb_mw_read(SIZE, offset) \ 257 ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset) 258#define ntb_mw_write(SIZE, offset, val) \ 259 ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ 260 offset, val) 261 262static int ntb_probe(device_t device); 263static int ntb_attach(device_t device); 264static int ntb_detach(device_t device); 265static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw); 266static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); 267static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, 268 uint32_t *base, uint32_t *xlat, uint32_t *lmt); 269static int ntb_map_pci_bars(struct ntb_softc *ntb); 270static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, 271 const char *); 272static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); 273static int map_memory_window_bar(struct ntb_softc *ntb, 274 struct ntb_pci_bar_info *bar); 275static void ntb_unmap_pci_bar(struct ntb_softc *ntb); 276static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); 277static int ntb_init_isr(struct ntb_softc *ntb); 278static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); 279static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); 280static void ntb_teardown_interrupts(struct ntb_softc *ntb); 281static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); 282static void ntb_interrupt(struct ntb_softc *, uint32_t vec); 283static void ndev_vec_isr(void *arg); 284static void ndev_irq_isr(void *arg); 285static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); 286static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); 287static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); 288static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); 289static void ntb_free_msix_vec(struct ntb_softc *ntb); 290static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); 291static void ntb_detect_max_mw(struct ntb_softc *ntb); 292static int ntb_detect_xeon(struct ntb_softc *ntb); 293static int ntb_detect_atom(struct ntb_softc *ntb); 294static int ntb_xeon_init_dev(struct ntb_softc *ntb); 295static int ntb_atom_init_dev(struct ntb_softc *ntb); 296static void ntb_teardown_xeon(struct ntb_softc *ntb); 297static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); 298static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, 299 enum ntb_bar regbar); 300static void xeon_set_sbar_base_and_limit(struct ntb_softc *, 301 uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); 302static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, 303 enum ntb_bar idx); 304static int xeon_setup_b2b_mw(struct ntb_softc *, 305 const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); 306static inline bool link_is_up(struct ntb_softc *ntb); 307static inline bool atom_link_is_err(struct ntb_softc *ntb); 308static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *); 309static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *); 310static void atom_link_hb(void *arg); 311static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec); 312static void recover_atom_link(void *arg); 313static bool ntb_poll_link(struct ntb_softc *ntb); 314static void save_bar_parameters(struct ntb_pci_bar_info *bar); 315static void ntb_sysctl_init(struct ntb_softc *); 316static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); 317static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); 318static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); 319 320static unsigned g_ntb_hw_debug_level; 321SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 322 &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); 323#define ntb_printf(lvl, ...) do { \ 324 if ((lvl) <= g_ntb_hw_debug_level) { \ 325 device_printf(ntb->device, __VA_ARGS__); \ 326 } \ 327} while (0) 328 329static unsigned g_ntb_enable_wc = 1; 330SYSCTL_UINT(_hw_ntb, OID_AUTO, enable_writecombine, CTLFLAG_RDTUN, 331 &g_ntb_enable_wc, 0, "Set to 1 to map memory windows write combining"); 332 333static struct ntb_hw_info pci_ids[] = { 334 /* XXX: PS/SS IDs left out until they are supported. */ 335 { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", 336 NTB_ATOM, 0 }, 337 338 { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", 339 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 340 { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", 341 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 342 { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, 343 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 344 NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, 345 { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, 346 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 347 NTB_SB01BASE_LOCKUP }, 348 { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, 349 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 350 NTB_SB01BASE_LOCKUP }, 351 352 { 0x00000000, NULL, NTB_ATOM, 0 } 353}; 354 355static const struct ntb_reg atom_reg = { 356 .ntb_ctl = ATOM_NTBCNTL_OFFSET, 357 .lnk_sta = ATOM_LINK_STATUS_OFFSET, 358 .db_size = sizeof(uint64_t), 359 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, 360}; 361 362static const struct ntb_alt_reg atom_pri_reg = { 363 .db_bell = ATOM_PDOORBELL_OFFSET, 364 .db_mask = ATOM_PDBMSK_OFFSET, 365 .spad = ATOM_SPAD_OFFSET, 366}; 367 368static const struct ntb_alt_reg atom_b2b_reg = { 369 .db_bell = ATOM_B2B_DOORBELL_OFFSET, 370 .spad = ATOM_B2B_SPAD_OFFSET, 371}; 372 373static const struct ntb_xlat_reg atom_sec_xlat = { 374#if 0 375 /* "FIXME" says the Linux driver. */ 376 .bar0_base = ATOM_SBAR0BASE_OFFSET, 377 .bar2_base = ATOM_SBAR2BASE_OFFSET, 378 .bar4_base = ATOM_SBAR4BASE_OFFSET, 379 380 .bar2_limit = ATOM_SBAR2LMT_OFFSET, 381 .bar4_limit = ATOM_SBAR4LMT_OFFSET, 382#endif 383 384 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, 385 .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, 386}; 387 388static const struct ntb_reg xeon_reg = { 389 .ntb_ctl = XEON_NTBCNTL_OFFSET, 390 .lnk_sta = XEON_LINK_STATUS_OFFSET, 391 .db_size = sizeof(uint16_t), 392 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, 393}; 394 395static const struct ntb_alt_reg xeon_pri_reg = { 396 .db_bell = XEON_PDOORBELL_OFFSET, 397 .db_mask = XEON_PDBMSK_OFFSET, 398 .spad = XEON_SPAD_OFFSET, 399}; 400 401static const struct ntb_alt_reg xeon_b2b_reg = { 402 .db_bell = XEON_B2B_DOORBELL_OFFSET, 403 .spad = XEON_B2B_SPAD_OFFSET, 404}; 405 406static const struct ntb_xlat_reg xeon_sec_xlat = { 407 .bar0_base = XEON_SBAR0BASE_OFFSET, 408 .bar2_base = XEON_SBAR2BASE_OFFSET, 409 .bar4_base = XEON_SBAR4BASE_OFFSET, 410 .bar5_base = XEON_SBAR5BASE_OFFSET, 411 412 .bar2_limit = XEON_SBAR2LMT_OFFSET, 413 .bar4_limit = XEON_SBAR4LMT_OFFSET, 414 .bar5_limit = XEON_SBAR5LMT_OFFSET, 415 416 .bar2_xlat = XEON_SBAR2XLAT_OFFSET, 417 .bar4_xlat = XEON_SBAR4XLAT_OFFSET, 418 .bar5_xlat = XEON_SBAR5XLAT_OFFSET, 419}; 420 421static struct ntb_b2b_addr xeon_b2b_usd_addr = { 422 .bar0_addr = XEON_B2B_BAR0_ADDR, 423 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 424 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 425 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 426 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 427}; 428 429static struct ntb_b2b_addr xeon_b2b_dsd_addr = { 430 .bar0_addr = XEON_B2B_BAR0_ADDR, 431 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 432 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 433 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 434 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 435}; 436 437SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0, 438 "B2B MW segment overrides -- MUST be the same on both sides"); 439 440SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, 441 &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 442 "hardware, use this 64-bit address on the bus between the NTB devices for " 443 "the window at BAR2, on the upstream side of the link. MUST be the same " 444 "address on both sides."); 445SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, 446 &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); 447SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, 448 &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " 449 "(split-BAR mode)."); 450SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, 451 &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " 452 "(split-BAR mode)."); 453 454SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, 455 &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 456 "hardware, use this 64-bit address on the bus between the NTB devices for " 457 "the window at BAR2, on the downstream side of the link. MUST be the same" 458 " address on both sides."); 459SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, 460 &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); 461SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, 462 &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " 463 "(split-BAR mode)."); 464SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, 465 &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " 466 "(split-BAR mode)."); 467 468/* 469 * OS <-> Driver interface structures 470 */ 471MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); 472 473static device_method_t ntb_pci_methods[] = { 474 /* Device interface */ 475 DEVMETHOD(device_probe, ntb_probe), 476 DEVMETHOD(device_attach, ntb_attach), 477 DEVMETHOD(device_detach, ntb_detach), 478 DEVMETHOD_END 479}; 480 481static driver_t ntb_pci_driver = { 482 "ntb_hw", 483 ntb_pci_methods, 484 sizeof(struct ntb_softc), 485}; 486 487static devclass_t ntb_devclass; 488DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); 489MODULE_VERSION(ntb_hw, 1); 490 491SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); 492 493/* 494 * OS <-> Driver linkage functions 495 */ 496static int 497ntb_probe(device_t device) 498{ 499 struct ntb_hw_info *p; 500 501 p = ntb_get_device_info(pci_get_devid(device)); 502 if (p == NULL) 503 return (ENXIO); 504 505 device_set_desc(device, p->desc); 506 return (0); 507} 508 509static int 510ntb_attach(device_t device) 511{ 512 struct ntb_softc *ntb; 513 struct ntb_hw_info *p; 514 int error; 515 516 ntb = DEVICE2SOFTC(device); 517 p = ntb_get_device_info(pci_get_devid(device)); 518 519 ntb->device = device; 520 ntb->type = p->type; 521 ntb->features = p->features; 522 ntb->b2b_mw_idx = B2B_MW_DISABLED; 523 524 /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ 525 callout_init(&ntb->heartbeat_timer, 1); 526 callout_init(&ntb->lr_timer, 1); 527 mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); 528 mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_DEF); 529 530 if (ntb->type == NTB_ATOM) 531 error = ntb_detect_atom(ntb); 532 else 533 error = ntb_detect_xeon(ntb); 534 if (error != 0) 535 goto out; 536 537 ntb_detect_max_mw(ntb); 538 539 pci_enable_busmaster(ntb->device); 540 541 error = ntb_map_pci_bars(ntb); 542 if (error != 0) 543 goto out; 544 if (ntb->type == NTB_ATOM) 545 error = ntb_atom_init_dev(ntb); 546 else 547 error = ntb_xeon_init_dev(ntb); 548 if (error != 0) 549 goto out; 550 551 ntb_poll_link(ntb); 552 553 ntb_sysctl_init(ntb); 554 555out: 556 if (error != 0) 557 ntb_detach(device); 558 return (error); 559} 560 561static int 562ntb_detach(device_t device) 563{ 564 struct ntb_softc *ntb; 565 566 ntb = DEVICE2SOFTC(device); 567 568 if (ntb->self_reg != NULL) 569 ntb_db_set_mask(ntb, ntb->db_valid_mask); 570 callout_drain(&ntb->heartbeat_timer); 571 callout_drain(&ntb->lr_timer); 572 pci_disable_busmaster(ntb->device); 573 if (ntb->type == NTB_XEON) 574 ntb_teardown_xeon(ntb); 575 ntb_teardown_interrupts(ntb); 576 577 mtx_destroy(&ntb->db_mask_lock); 578 mtx_destroy(&ntb->ctx_lock); 579 580 /* 581 * Redetect total MWs so we unmap properly -- in case we lowered the 582 * maximum to work around Xeon errata. 583 */ 584 ntb_detect_max_mw(ntb); 585 ntb_unmap_pci_bar(ntb); 586 587 return (0); 588} 589 590/* 591 * Driver internal routines 592 */ 593static inline enum ntb_bar 594ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) 595{ 596 597 KASSERT(mw < ntb->mw_count || 598 (mw != B2B_MW_DISABLED && mw == ntb->b2b_mw_idx), 599 ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); 600 KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); 601 602 return (ntb->reg->mw_bar[mw]); 603} 604 605static inline bool 606bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) 607{ 608 /* XXX This assertion could be stronger. */ 609 KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); 610 return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR)); 611} 612 613static inline void 614bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, 615 uint32_t *xlat, uint32_t *lmt) 616{ 617 uint32_t basev, lmtv, xlatv; 618 619 switch (bar) { 620 case NTB_B2B_BAR_1: 621 basev = ntb->xlat_reg->bar2_base; 622 lmtv = ntb->xlat_reg->bar2_limit; 623 xlatv = ntb->xlat_reg->bar2_xlat; 624 break; 625 case NTB_B2B_BAR_2: 626 basev = ntb->xlat_reg->bar4_base; 627 lmtv = ntb->xlat_reg->bar4_limit; 628 xlatv = ntb->xlat_reg->bar4_xlat; 629 break; 630 case NTB_B2B_BAR_3: 631 basev = ntb->xlat_reg->bar5_base; 632 lmtv = ntb->xlat_reg->bar5_limit; 633 xlatv = ntb->xlat_reg->bar5_xlat; 634 break; 635 default: 636 KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, 637 ("bad bar")); 638 basev = lmtv = xlatv = 0; 639 break; 640 } 641 642 if (base != NULL) 643 *base = basev; 644 if (xlat != NULL) 645 *xlat = xlatv; 646 if (lmt != NULL) 647 *lmt = lmtv; 648} 649 650static int 651ntb_map_pci_bars(struct ntb_softc *ntb) 652{ 653 int rc; 654 655 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 656 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 657 if (rc != 0) 658 goto out; 659 660 ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); 661 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); 662 if (rc != 0) 663 goto out; 664 ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; 665 ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; 666 ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; 667 668 ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); 669 /* XXX Are shared MW B2Bs write-combining? */ 670 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP) && !HAS_FEATURE(NTB_SPLIT_BAR)) 671 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 672 else 673 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 674 ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; 675 ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; 676 ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; 677 678 if (!HAS_FEATURE(NTB_SPLIT_BAR)) 679 goto out; 680 681 ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); 682 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 683 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 684 else 685 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 686 ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; 687 ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; 688 ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; 689 690out: 691 if (rc != 0) 692 device_printf(ntb->device, 693 "unable to allocate pci resource\n"); 694 return (rc); 695} 696 697static void 698print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, 699 const char *kind) 700{ 701 702 device_printf(ntb->device, 703 "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", 704 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 705 (char *)bar->vbase + bar->size - 1, 706 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 707 (uintmax_t)bar->size, kind); 708} 709 710static int 711map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 712{ 713 714 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 715 &bar->pci_resource_id, RF_ACTIVE); 716 if (bar->pci_resource == NULL) 717 return (ENXIO); 718 719 save_bar_parameters(bar); 720 print_map_success(ntb, bar, "mmr"); 721 return (0); 722} 723 724static int 725map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 726{ 727 int rc; 728 uint8_t bar_size_bits = 0; 729 730 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 731 &bar->pci_resource_id, RF_ACTIVE); 732 733 if (bar->pci_resource == NULL) 734 return (ENXIO); 735 736 save_bar_parameters(bar); 737 /* 738 * Ivytown NTB BAR sizes are misreported by the hardware due to a 739 * hardware issue. To work around this, query the size it should be 740 * configured to by the device and modify the resource to correspond to 741 * this new size. The BIOS on systems with this problem is required to 742 * provide enough address space to allow the driver to make this change 743 * safely. 744 * 745 * Ideally I could have just specified the size when I allocated the 746 * resource like: 747 * bus_alloc_resource(ntb->device, 748 * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, 749 * 1ul << bar_size_bits, RF_ACTIVE); 750 * but the PCI driver does not honor the size in this call, so we have 751 * to modify it after the fact. 752 */ 753 if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { 754 if (bar->pci_resource_id == PCIR_BAR(2)) 755 bar_size_bits = pci_read_config(ntb->device, 756 XEON_PBAR23SZ_OFFSET, 1); 757 else 758 bar_size_bits = pci_read_config(ntb->device, 759 XEON_PBAR45SZ_OFFSET, 1); 760 761 rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, 762 bar->pci_resource, bar->pbase, 763 bar->pbase + (1ul << bar_size_bits) - 1); 764 if (rc != 0) { 765 device_printf(ntb->device, 766 "unable to resize bar\n"); 767 return (rc); 768 } 769 770 save_bar_parameters(bar); 771 } 772 773 print_map_success(ntb, bar, "mw"); 774 if (g_ntb_enable_wc == 0) 775 return (0); 776 777 /* Mark bar region as write combining to improve performance. */ 778 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, 779 VM_MEMATTR_WRITE_COMBINING); 780 if (rc == 0) 781 device_printf(ntb->device, 782 "Marked BAR%d v:[%p-%p] p:[%p-%p] as " 783 "WRITE_COMBINING.\n", 784 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 785 (char *)bar->vbase + bar->size - 1, 786 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1)); 787 else 788 device_printf(ntb->device, 789 "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " 790 "WRITE_COMBINING: %d\n", 791 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 792 (char *)bar->vbase + bar->size - 1, 793 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 794 rc); 795 /* Proceed anyway */ 796 return (0); 797} 798 799static void 800ntb_unmap_pci_bar(struct ntb_softc *ntb) 801{ 802 struct ntb_pci_bar_info *current_bar; 803 int i; 804 805 for (i = 0; i < NTB_MAX_BARS; i++) { 806 current_bar = &ntb->bar_info[i]; 807 if (current_bar->pci_resource != NULL) 808 bus_release_resource(ntb->device, SYS_RES_MEMORY, 809 current_bar->pci_resource_id, 810 current_bar->pci_resource); 811 } 812} 813 814static int 815ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) 816{ 817 uint32_t i; 818 int rc; 819 820 for (i = 0; i < num_vectors; i++) { 821 ntb->int_info[i].rid = i + 1; 822 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 823 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); 824 if (ntb->int_info[i].res == NULL) { 825 device_printf(ntb->device, 826 "bus_alloc_resource failed\n"); 827 return (ENOMEM); 828 } 829 ntb->int_info[i].tag = NULL; 830 ntb->allocated_interrupts++; 831 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 832 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, 833 &ntb->msix_vec[i], &ntb->int_info[i].tag); 834 if (rc != 0) { 835 device_printf(ntb->device, "bus_setup_intr failed\n"); 836 return (ENXIO); 837 } 838 } 839 return (0); 840} 841 842/* 843 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector 844 * cannot be allocated for each MSI-X message. JHB seems to think remapping 845 * should be okay. This tunable should enable us to test that hypothesis 846 * when someone gets their hands on some Xeon hardware. 847 */ 848static int ntb_force_remap_mode; 849SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, 850 &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" 851 " to a smaller number of ithreads, even if the desired number are " 852 "available"); 853 854/* 855 * In case it is NOT ok, give consumers an abort button. 856 */ 857static int ntb_prefer_intx; 858SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, 859 &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " 860 "than remapping MSI-X messages over available slots (match Linux driver " 861 "behavior)"); 862 863/* 864 * Remap the desired number of MSI-X messages to available ithreads in a simple 865 * round-robin fashion. 866 */ 867static int 868ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) 869{ 870 u_int *vectors; 871 uint32_t i; 872 int rc; 873 874 if (ntb_prefer_intx != 0) 875 return (ENXIO); 876 877 vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); 878 879 for (i = 0; i < desired; i++) 880 vectors[i] = (i % avail) + 1; 881 882 rc = pci_remap_msix(dev, desired, vectors); 883 free(vectors, M_NTB); 884 return (rc); 885} 886 887static int 888ntb_init_isr(struct ntb_softc *ntb) 889{ 890 uint32_t desired_vectors, num_vectors; 891 int rc; 892 893 ntb->allocated_interrupts = 0; 894 ntb->last_ts = ticks; 895 896 /* 897 * Mask all doorbell interrupts. 898 */ 899 ntb_db_set_mask(ntb, ntb->db_valid_mask); 900 901 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), 902 ntb->db_count); 903 if (desired_vectors >= 1) { 904 rc = pci_alloc_msix(ntb->device, &num_vectors); 905 906 if (ntb_force_remap_mode != 0 && rc == 0 && 907 num_vectors == desired_vectors) 908 num_vectors--; 909 910 if (rc == 0 && num_vectors < desired_vectors) { 911 rc = ntb_remap_msix(ntb->device, desired_vectors, 912 num_vectors); 913 if (rc == 0) 914 num_vectors = desired_vectors; 915 else 916 pci_release_msi(ntb->device); 917 } 918 if (rc != 0) 919 num_vectors = 1; 920 } else 921 num_vectors = 1; 922 923 if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { 924 ntb->db_vec_count = 1; 925 ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; 926 rc = ntb_setup_legacy_interrupt(ntb); 927 } else { 928 ntb_create_msix_vec(ntb, num_vectors); 929 rc = ntb_setup_msix(ntb, num_vectors); 930 } 931 if (rc != 0) { 932 device_printf(ntb->device, 933 "Error allocating interrupts: %d\n", rc); 934 ntb_free_msix_vec(ntb); 935 } 936 937 return (rc); 938} 939 940static int 941ntb_setup_legacy_interrupt(struct ntb_softc *ntb) 942{ 943 int rc; 944 945 ntb->int_info[0].rid = 0; 946 ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, 947 &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); 948 if (ntb->int_info[0].res == NULL) { 949 device_printf(ntb->device, "bus_alloc_resource failed\n"); 950 return (ENOMEM); 951 } 952 953 ntb->int_info[0].tag = NULL; 954 ntb->allocated_interrupts = 1; 955 956 rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, 957 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, 958 ntb, &ntb->int_info[0].tag); 959 if (rc != 0) { 960 device_printf(ntb->device, "bus_setup_intr failed\n"); 961 return (ENXIO); 962 } 963 964 return (0); 965} 966 967static void 968ntb_teardown_interrupts(struct ntb_softc *ntb) 969{ 970 struct ntb_int_info *current_int; 971 int i; 972 973 for (i = 0; i < ntb->allocated_interrupts; i++) { 974 current_int = &ntb->int_info[i]; 975 if (current_int->tag != NULL) 976 bus_teardown_intr(ntb->device, current_int->res, 977 current_int->tag); 978 979 if (current_int->res != NULL) 980 bus_release_resource(ntb->device, SYS_RES_IRQ, 981 rman_get_rid(current_int->res), current_int->res); 982 } 983 984 ntb_free_msix_vec(ntb); 985 pci_release_msi(ntb->device); 986} 987 988/* 989 * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon. Abstract it 990 * out to make code clearer. 991 */ 992static inline uint64_t 993db_ioread(struct ntb_softc *ntb, uint64_t regoff) 994{ 995 996 if (ntb->type == NTB_ATOM) 997 return (ntb_reg_read(8, regoff)); 998 999 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1000 1001 return (ntb_reg_read(2, regoff)); 1002} 1003 1004static inline void 1005db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1006{ 1007 1008 KASSERT((val & ~ntb->db_valid_mask) == 0, 1009 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1010 (uintmax_t)(val & ~ntb->db_valid_mask), 1011 (uintmax_t)ntb->db_valid_mask)); 1012 1013 if (regoff == ntb->self_reg->db_mask) 1014 DB_MASK_ASSERT(ntb, MA_OWNED); 1015 db_iowrite_raw(ntb, regoff, val); 1016} 1017 1018static inline void 1019db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1020{ 1021 1022 if (ntb->type == NTB_ATOM) { 1023 ntb_reg_write(8, regoff, val); 1024 return; 1025 } 1026 1027 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1028 ntb_reg_write(2, regoff, (uint16_t)val); 1029} 1030 1031void 1032ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits) 1033{ 1034 1035 DB_MASK_LOCK(ntb); 1036 ntb->db_mask |= bits; 1037 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1038 DB_MASK_UNLOCK(ntb); 1039} 1040 1041void 1042ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits) 1043{ 1044 1045 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1046 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1047 (uintmax_t)(bits & ~ntb->db_valid_mask), 1048 (uintmax_t)ntb->db_valid_mask)); 1049 1050 DB_MASK_LOCK(ntb); 1051 ntb->db_mask &= ~bits; 1052 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1053 DB_MASK_UNLOCK(ntb); 1054} 1055 1056uint64_t 1057ntb_db_read(struct ntb_softc *ntb) 1058{ 1059 1060 return (db_ioread(ntb, ntb->self_reg->db_bell)); 1061} 1062 1063void 1064ntb_db_clear(struct ntb_softc *ntb, uint64_t bits) 1065{ 1066 1067 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1068 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1069 (uintmax_t)(bits & ~ntb->db_valid_mask), 1070 (uintmax_t)ntb->db_valid_mask)); 1071 1072 db_iowrite(ntb, ntb->self_reg->db_bell, bits); 1073} 1074 1075static inline uint64_t 1076ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) 1077{ 1078 uint64_t shift, mask; 1079 1080 shift = ntb->db_vec_shift; 1081 mask = (1ull << shift) - 1; 1082 return (mask << (shift * db_vector)); 1083} 1084 1085static void 1086ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) 1087{ 1088 uint64_t vec_mask; 1089 1090 ntb->last_ts = ticks; 1091 vec_mask = ntb_vec_mask(ntb, vec); 1092 1093 if ((vec_mask & ntb->db_link_mask) != 0) { 1094 if (ntb_poll_link(ntb)) 1095 ntb_link_event(ntb); 1096 } 1097 1098 if ((vec_mask & ntb->db_valid_mask) != 0) 1099 ntb_db_event(ntb, vec); 1100} 1101 1102static void 1103ndev_vec_isr(void *arg) 1104{ 1105 struct ntb_vec *nvec = arg; 1106 1107 ntb_interrupt(nvec->ntb, nvec->num); 1108} 1109 1110static void 1111ndev_irq_isr(void *arg) 1112{ 1113 /* If we couldn't set up MSI-X, we only have the one vector. */ 1114 ntb_interrupt(arg, 0); 1115} 1116 1117static int 1118ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) 1119{ 1120 uint32_t i; 1121 1122 ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, 1123 M_ZERO | M_WAITOK); 1124 for (i = 0; i < num_vectors; i++) { 1125 ntb->msix_vec[i].num = i; 1126 ntb->msix_vec[i].ntb = ntb; 1127 } 1128 1129 return (0); 1130} 1131 1132static void 1133ntb_free_msix_vec(struct ntb_softc *ntb) 1134{ 1135 1136 if (ntb->msix_vec == NULL) 1137 return; 1138 1139 free(ntb->msix_vec, M_NTB); 1140 ntb->msix_vec = NULL; 1141} 1142 1143static struct ntb_hw_info * 1144ntb_get_device_info(uint32_t device_id) 1145{ 1146 struct ntb_hw_info *ep = pci_ids; 1147 1148 while (ep->device_id) { 1149 if (ep->device_id == device_id) 1150 return (ep); 1151 ++ep; 1152 } 1153 return (NULL); 1154} 1155 1156static void 1157ntb_teardown_xeon(struct ntb_softc *ntb) 1158{ 1159 1160 if (ntb->reg != NULL) 1161 ntb_link_disable(ntb); 1162} 1163 1164static void 1165ntb_detect_max_mw(struct ntb_softc *ntb) 1166{ 1167 1168 if (ntb->type == NTB_ATOM) { 1169 ntb->mw_count = ATOM_MW_COUNT; 1170 return; 1171 } 1172 1173 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1174 ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; 1175 else 1176 ntb->mw_count = XEON_SNB_MW_COUNT; 1177} 1178 1179static int 1180ntb_detect_xeon(struct ntb_softc *ntb) 1181{ 1182 uint8_t ppd, conn_type; 1183 1184 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); 1185 ntb->ppd = ppd; 1186 1187 if ((ppd & XEON_PPD_DEV_TYPE) != 0) 1188 ntb->dev_type = NTB_DEV_DSD; 1189 else 1190 ntb->dev_type = NTB_DEV_USD; 1191 1192 if ((ppd & XEON_PPD_SPLIT_BAR) != 0) 1193 ntb->features |= NTB_SPLIT_BAR; 1194 1195 /* SB01BASE_LOCKUP errata is a superset of SDOORBELL errata */ 1196 if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) 1197 ntb->features |= NTB_SDOORBELL_LOCKUP; 1198 1199 conn_type = ppd & XEON_PPD_CONN_TYPE; 1200 switch (conn_type) { 1201 case NTB_CONN_B2B: 1202 ntb->conn_type = conn_type; 1203 break; 1204 case NTB_CONN_RP: 1205 case NTB_CONN_TRANSPARENT: 1206 default: 1207 device_printf(ntb->device, "Unsupported connection type: %u\n", 1208 (unsigned)conn_type); 1209 return (ENXIO); 1210 } 1211 return (0); 1212} 1213 1214static int 1215ntb_detect_atom(struct ntb_softc *ntb) 1216{ 1217 uint32_t ppd, conn_type; 1218 1219 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); 1220 ntb->ppd = ppd; 1221 1222 if ((ppd & ATOM_PPD_DEV_TYPE) != 0) 1223 ntb->dev_type = NTB_DEV_DSD; 1224 else 1225 ntb->dev_type = NTB_DEV_USD; 1226 1227 conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; 1228 switch (conn_type) { 1229 case NTB_CONN_B2B: 1230 ntb->conn_type = conn_type; 1231 break; 1232 default: 1233 device_printf(ntb->device, "Unsupported NTB configuration\n"); 1234 return (ENXIO); 1235 } 1236 return (0); 1237} 1238 1239static int 1240ntb_xeon_init_dev(struct ntb_softc *ntb) 1241{ 1242 int rc; 1243 1244 ntb->spad_count = XEON_SPAD_COUNT; 1245 ntb->db_count = XEON_DB_COUNT; 1246 ntb->db_link_mask = XEON_DB_LINK_BIT; 1247 ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; 1248 ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; 1249 1250 if (ntb->conn_type != NTB_CONN_B2B) { 1251 device_printf(ntb->device, "Connection type %d not supported\n", 1252 ntb->conn_type); 1253 return (ENXIO); 1254 } 1255 1256 ntb->reg = &xeon_reg; 1257 ntb->self_reg = &xeon_pri_reg; 1258 ntb->peer_reg = &xeon_b2b_reg; 1259 ntb->xlat_reg = &xeon_sec_xlat; 1260 1261 /* 1262 * There is a Xeon hardware errata related to writes to SDOORBELL or 1263 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, 1264 * which may hang the system. To workaround this use the second memory 1265 * window to access the interrupt and scratch pad registers on the 1266 * remote system. 1267 */ 1268 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 1269 /* Use the last MW for mapping remote spad */ 1270 ntb->b2b_mw_idx = ntb->mw_count - 1; 1271 else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14)) 1272 /* 1273 * HW Errata on bit 14 of b2bdoorbell register. Writes will not be 1274 * mirrored to the remote system. Shrink the number of bits by one, 1275 * since bit 14 is the last bit. 1276 * 1277 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register 1278 * anyway. Nor for non-B2B connection types. 1279 */ 1280 ntb->db_count = XEON_DB_COUNT - 1; 1281 1282 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1283 1284 if (ntb->dev_type == NTB_DEV_USD) 1285 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, 1286 &xeon_b2b_usd_addr); 1287 else 1288 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, 1289 &xeon_b2b_dsd_addr); 1290 if (rc != 0) 1291 return (rc); 1292 1293 /* Enable Bus Master and Memory Space on the secondary side */ 1294 ntb_reg_write(2, XEON_SPCICMD_OFFSET, 1295 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1296 1297 /* 1298 * Mask all doorbell interrupts. 1299 */ 1300 ntb_db_set_mask(ntb, ntb->db_valid_mask); 1301 1302 rc = ntb_init_isr(ntb); 1303 return (rc); 1304} 1305 1306static int 1307ntb_atom_init_dev(struct ntb_softc *ntb) 1308{ 1309 int error; 1310 1311 KASSERT(ntb->conn_type == NTB_CONN_B2B, 1312 ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); 1313 1314 ntb->spad_count = ATOM_SPAD_COUNT; 1315 ntb->db_count = ATOM_DB_COUNT; 1316 ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; 1317 ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; 1318 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1319 1320 ntb->reg = &atom_reg; 1321 ntb->self_reg = &atom_pri_reg; 1322 ntb->peer_reg = &atom_b2b_reg; 1323 ntb->xlat_reg = &atom_sec_xlat; 1324 1325 /* 1326 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is 1327 * resolved. Mask transaction layer internal parity errors. 1328 */ 1329 pci_write_config(ntb->device, 0xFC, 0x4, 4); 1330 1331 configure_atom_secondary_side_bars(ntb); 1332 1333 /* Enable Bus Master and Memory Space on the secondary side */ 1334 ntb_reg_write(2, ATOM_SPCICMD_OFFSET, 1335 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1336 1337 error = ntb_init_isr(ntb); 1338 if (error != 0) 1339 return (error); 1340 1341 /* Initiate PCI-E link training */ 1342 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1343 1344 callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); 1345 1346 return (0); 1347} 1348 1349/* XXX: Linux driver doesn't seem to do any of this for Atom. */ 1350static void 1351configure_atom_secondary_side_bars(struct ntb_softc *ntb) 1352{ 1353 1354 if (ntb->dev_type == NTB_DEV_USD) { 1355 ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1356 XEON_B2B_BAR2_ADDR64); 1357 ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1358 XEON_B2B_BAR4_ADDR64); 1359 ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1360 ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1361 } else { 1362 ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1363 XEON_B2B_BAR2_ADDR64); 1364 ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1365 XEON_B2B_BAR4_ADDR64); 1366 ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1367 ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1368 } 1369} 1370 1371 1372/* 1373 * When working around Xeon SDOORBELL errata by remapping remote registers in a 1374 * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW 1375 * remains for use by a higher layer. 1376 * 1377 * Will only be used if working around SDOORBELL errata and the BIOS-configured 1378 * MW size is sufficiently large. 1379 */ 1380static unsigned int ntb_b2b_mw_share; 1381SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 1382 0, "If enabled (non-zero), prefer to share half of the B2B peer register " 1383 "MW with higher level consumers. Both sides of the NTB MUST set the same " 1384 "value here."); 1385 1386static void 1387xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, 1388 enum ntb_bar regbar) 1389{ 1390 struct ntb_pci_bar_info *bar; 1391 uint8_t bar_sz; 1392 1393 if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) 1394 return; 1395 1396 bar = &ntb->bar_info[idx]; 1397 bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); 1398 if (idx == regbar) { 1399 if (ntb->b2b_off != 0) 1400 bar_sz--; 1401 else 1402 bar_sz = 0; 1403 } 1404 pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); 1405 bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); 1406 (void)bar_sz; 1407} 1408 1409static void 1410xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, 1411 enum ntb_bar idx, enum ntb_bar regbar) 1412{ 1413 uint64_t reg_val; 1414 uint32_t base_reg, lmt_reg; 1415 1416 bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); 1417 if (idx == regbar) 1418 bar_addr += ntb->b2b_off; 1419 1420 if (!bar_is_64bit(ntb, idx)) { 1421 ntb_reg_write(4, base_reg, bar_addr); 1422 reg_val = ntb_reg_read(4, base_reg); 1423 (void)reg_val; 1424 1425 ntb_reg_write(4, lmt_reg, bar_addr); 1426 reg_val = ntb_reg_read(4, lmt_reg); 1427 (void)reg_val; 1428 } else { 1429 ntb_reg_write(8, base_reg, bar_addr); 1430 reg_val = ntb_reg_read(8, base_reg); 1431 (void)reg_val; 1432 1433 ntb_reg_write(8, lmt_reg, bar_addr); 1434 reg_val = ntb_reg_read(8, lmt_reg); 1435 (void)reg_val; 1436 } 1437} 1438 1439static void 1440xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) 1441{ 1442 struct ntb_pci_bar_info *bar; 1443 1444 bar = &ntb->bar_info[idx]; 1445 if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { 1446 ntb_reg_write(4, bar->pbarxlat_off, base_addr); 1447 base_addr = ntb_reg_read(4, bar->pbarxlat_off); 1448 } else { 1449 ntb_reg_write(8, bar->pbarxlat_off, base_addr); 1450 base_addr = ntb_reg_read(8, bar->pbarxlat_off); 1451 } 1452 (void)base_addr; 1453} 1454 1455static int 1456xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, 1457 const struct ntb_b2b_addr *peer_addr) 1458{ 1459 struct ntb_pci_bar_info *b2b_bar; 1460 vm_size_t bar_size; 1461 uint64_t bar_addr; 1462 enum ntb_bar b2b_bar_num, i; 1463 1464 if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { 1465 b2b_bar = NULL; 1466 b2b_bar_num = NTB_CONFIG_BAR; 1467 ntb->b2b_off = 0; 1468 } else { 1469 b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); 1470 KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, 1471 ("invalid b2b mw bar")); 1472 1473 b2b_bar = &ntb->bar_info[b2b_bar_num]; 1474 bar_size = b2b_bar->size; 1475 1476 if (ntb_b2b_mw_share != 0 && 1477 (bar_size >> 1) >= XEON_B2B_MIN_SIZE) 1478 ntb->b2b_off = bar_size >> 1; 1479 else if (bar_size >= XEON_B2B_MIN_SIZE) { 1480 ntb->b2b_off = 0; 1481 ntb->mw_count--; 1482 } else { 1483 device_printf(ntb->device, 1484 "B2B bar size is too small!\n"); 1485 return (EIO); 1486 } 1487 } 1488 1489 /* 1490 * Reset the secondary bar sizes to match the primary bar sizes. 1491 * (Except, disable or halve the size of the B2B secondary bar.) 1492 */ 1493 for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) 1494 xeon_reset_sbar_size(ntb, i, b2b_bar_num); 1495 1496 bar_addr = 0; 1497 if (b2b_bar_num == NTB_CONFIG_BAR) 1498 bar_addr = addr->bar0_addr; 1499 else if (b2b_bar_num == NTB_B2B_BAR_1) 1500 bar_addr = addr->bar2_addr64; 1501 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) 1502 bar_addr = addr->bar4_addr64; 1503 else if (b2b_bar_num == NTB_B2B_BAR_2) 1504 bar_addr = addr->bar4_addr32; 1505 else if (b2b_bar_num == NTB_B2B_BAR_3) 1506 bar_addr = addr->bar5_addr32; 1507 else 1508 KASSERT(false, ("invalid bar")); 1509 1510 ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); 1511 1512 /* 1513 * Other SBARs are normally hit by the PBAR xlat, except for the b2b 1514 * register BAR. The B2B BAR is either disabled above or configured 1515 * half-size. It starts at PBAR xlat + offset. 1516 * 1517 * Also set up incoming BAR limits == base (zero length window). 1518 */ 1519 xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, 1520 b2b_bar_num); 1521 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1522 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, 1523 NTB_B2B_BAR_2, b2b_bar_num); 1524 xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, 1525 NTB_B2B_BAR_3, b2b_bar_num); 1526 } else 1527 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, 1528 NTB_B2B_BAR_2, b2b_bar_num); 1529 1530 /* Zero incoming translation addrs */ 1531 ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); 1532 ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); 1533 1534 /* Zero outgoing translation limits (whole bar size windows) */ 1535 ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); 1536 ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); 1537 1538 /* Set outgoing translation offsets */ 1539 xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); 1540 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1541 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); 1542 xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); 1543 } else 1544 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); 1545 1546 /* Set the translation offset for B2B registers */ 1547 bar_addr = 0; 1548 if (b2b_bar_num == NTB_CONFIG_BAR) 1549 bar_addr = peer_addr->bar0_addr; 1550 else if (b2b_bar_num == NTB_B2B_BAR_1) 1551 bar_addr = peer_addr->bar2_addr64; 1552 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) 1553 bar_addr = peer_addr->bar4_addr64; 1554 else if (b2b_bar_num == NTB_B2B_BAR_2) 1555 bar_addr = peer_addr->bar4_addr32; 1556 else if (b2b_bar_num == NTB_B2B_BAR_3) 1557 bar_addr = peer_addr->bar5_addr32; 1558 else 1559 KASSERT(false, ("invalid bar")); 1560 1561 /* 1562 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits 1563 * at a time. 1564 */ 1565 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); 1566 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); 1567 return (0); 1568} 1569 1570static inline bool 1571link_is_up(struct ntb_softc *ntb) 1572{ 1573 1574 if (ntb->type == NTB_XEON) { 1575 if (ntb->conn_type == NTB_CONN_TRANSPARENT) 1576 return (true); 1577 return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); 1578 } 1579 1580 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1581 return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); 1582} 1583 1584static inline bool 1585atom_link_is_err(struct ntb_softc *ntb) 1586{ 1587 uint32_t status; 1588 1589 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1590 1591 status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1592 if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) 1593 return (true); 1594 1595 status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1596 return ((status & ATOM_IBIST_ERR_OFLOW) != 0); 1597} 1598 1599/* Atom does not have link status interrupt, poll on that platform */ 1600static void 1601atom_link_hb(void *arg) 1602{ 1603 struct ntb_softc *ntb = arg; 1604 sbintime_t timo, poll_ts; 1605 1606 timo = NTB_HB_TIMEOUT * hz; 1607 poll_ts = ntb->last_ts + timo; 1608 1609 /* 1610 * Delay polling the link status if an interrupt was received, unless 1611 * the cached link status says the link is down. 1612 */ 1613 if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { 1614 timo = poll_ts - ticks; 1615 goto out; 1616 } 1617 1618 if (ntb_poll_link(ntb)) 1619 ntb_link_event(ntb); 1620 1621 if (!link_is_up(ntb) && atom_link_is_err(ntb)) { 1622 /* Link is down with error, proceed with recovery */ 1623 callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); 1624 return; 1625 } 1626 1627out: 1628 callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); 1629} 1630 1631static void 1632atom_perform_link_restart(struct ntb_softc *ntb) 1633{ 1634 uint32_t status; 1635 1636 /* Driver resets the NTB ModPhy lanes - magic! */ 1637 ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); 1638 ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); 1639 ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); 1640 ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); 1641 1642 /* Driver waits 100ms to allow the NTB ModPhy to settle */ 1643 pause("ModPhy", hz / 10); 1644 1645 /* Clear AER Errors, write to clear */ 1646 status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); 1647 status &= PCIM_AER_COR_REPLAY_ROLLOVER; 1648 ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); 1649 1650 /* Clear unexpected electrical idle event in LTSSM, write to clear */ 1651 status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); 1652 status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; 1653 ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); 1654 1655 /* Clear DeSkew Buffer error, write to clear */ 1656 status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); 1657 status |= ATOM_DESKEWSTS_DBERR; 1658 ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); 1659 1660 status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1661 status &= ATOM_IBIST_ERR_OFLOW; 1662 ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); 1663 1664 /* Releases the NTB state machine to allow the link to retrain */ 1665 status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1666 status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; 1667 ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); 1668} 1669 1670/* 1671 * ntb_set_ctx() - associate a driver context with an ntb device 1672 * @ntb: NTB device context 1673 * @ctx: Driver context 1674 * @ctx_ops: Driver context operations 1675 * 1676 * Associate a driver context and operations with a ntb device. The context is 1677 * provided by the client driver, and the driver may associate a different 1678 * context with each ntb device. 1679 * 1680 * Return: Zero if the context is associated, otherwise an error number. 1681 */ 1682int 1683ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops) 1684{ 1685 1686 if (ctx == NULL || ops == NULL) 1687 return (EINVAL); 1688 if (ntb->ctx_ops != NULL) 1689 return (EINVAL); 1690 1691 CTX_LOCK(ntb); 1692 if (ntb->ctx_ops != NULL) { 1693 CTX_UNLOCK(ntb); 1694 return (EINVAL); 1695 } 1696 ntb->ntb_ctx = ctx; 1697 ntb->ctx_ops = ops; 1698 CTX_UNLOCK(ntb); 1699 1700 return (0); 1701} 1702 1703/* 1704 * It is expected that this will only be used from contexts where the ctx_lock 1705 * is not needed to protect ntb_ctx lifetime. 1706 */ 1707void * 1708ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops) 1709{ 1710 1711 KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus")); 1712 if (ops != NULL) 1713 *ops = ntb->ctx_ops; 1714 return (ntb->ntb_ctx); 1715} 1716 1717/* 1718 * ntb_clear_ctx() - disassociate any driver context from an ntb device 1719 * @ntb: NTB device context 1720 * 1721 * Clear any association that may exist between a driver context and the ntb 1722 * device. 1723 */ 1724void 1725ntb_clear_ctx(struct ntb_softc *ntb) 1726{ 1727 1728 CTX_LOCK(ntb); 1729 ntb->ntb_ctx = NULL; 1730 ntb->ctx_ops = NULL; 1731 CTX_UNLOCK(ntb); 1732} 1733 1734/* 1735 * ntb_link_event() - notify driver context of a change in link status 1736 * @ntb: NTB device context 1737 * 1738 * Notify the driver context that the link status may have changed. The driver 1739 * should call ntb_link_is_up() to get the current status. 1740 */ 1741void 1742ntb_link_event(struct ntb_softc *ntb) 1743{ 1744 1745 CTX_LOCK(ntb); 1746 if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL) 1747 ntb->ctx_ops->link_event(ntb->ntb_ctx); 1748 CTX_UNLOCK(ntb); 1749} 1750 1751/* 1752 * ntb_db_event() - notify driver context of a doorbell event 1753 * @ntb: NTB device context 1754 * @vector: Interrupt vector number 1755 * 1756 * Notify the driver context of a doorbell event. If hardware supports 1757 * multiple interrupt vectors for doorbells, the vector number indicates which 1758 * vector received the interrupt. The vector number is relative to the first 1759 * vector used for doorbells, starting at zero, and must be less than 1760 * ntb_db_vector_count(). The driver may call ntb_db_read() to check which 1761 * doorbell bits need service, and ntb_db_vector_mask() to determine which of 1762 * those bits are associated with the vector number. 1763 */ 1764static void 1765ntb_db_event(struct ntb_softc *ntb, uint32_t vec) 1766{ 1767 1768 CTX_LOCK(ntb); 1769 if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL) 1770 ntb->ctx_ops->db_event(ntb->ntb_ctx, vec); 1771 CTX_UNLOCK(ntb); 1772} 1773 1774/* 1775 * ntb_link_enable() - enable the link on the secondary side of the ntb 1776 * @ntb: NTB device context 1777 * @max_speed: The maximum link speed expressed as PCIe generation number[0] 1778 * @max_width: The maximum link width expressed as the number of PCIe lanes[0] 1779 * 1780 * Enable the link on the secondary side of the ntb. This can only be done 1781 * from the primary side of the ntb in primary or b2b topology. The ntb device 1782 * should train the link to its maximum speed and width, or the requested speed 1783 * and width, whichever is smaller, if supported. 1784 * 1785 * Return: Zero on success, otherwise an error number. 1786 * 1787 * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed 1788 * and width input will be ignored. 1789 */ 1790int 1791ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused, 1792 enum ntb_width w __unused) 1793{ 1794 uint32_t cntl; 1795 1796 if (ntb->type == NTB_ATOM) { 1797 pci_write_config(ntb->device, NTB_PPD_OFFSET, 1798 ntb->ppd | ATOM_PPD_INIT_LINK, 4); 1799 return (0); 1800 } 1801 1802 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1803 ntb_link_event(ntb); 1804 return (0); 1805 } 1806 1807 cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1808 cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); 1809 cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; 1810 cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; 1811 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1812 cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; 1813 ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 1814 return (0); 1815} 1816 1817/* 1818 * ntb_link_disable() - disable the link on the secondary side of the ntb 1819 * @ntb: NTB device context 1820 * 1821 * Disable the link on the secondary side of the ntb. This can only be done 1822 * from the primary side of the ntb in primary or b2b topology. The ntb device 1823 * should disable the link. Returning from this call must indicate that a 1824 * barrier has passed, though with no more writes may pass in either direction 1825 * across the link, except if this call returns an error number. 1826 * 1827 * Return: Zero on success, otherwise an error number. 1828 */ 1829int 1830ntb_link_disable(struct ntb_softc *ntb) 1831{ 1832 uint32_t cntl; 1833 1834 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1835 ntb_link_event(ntb); 1836 return (0); 1837 } 1838 1839 cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1840 cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); 1841 cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); 1842 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1843 cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); 1844 cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; 1845 ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 1846 return (0); 1847} 1848 1849static void 1850recover_atom_link(void *arg) 1851{ 1852 struct ntb_softc *ntb = arg; 1853 unsigned speed, width, oldspeed, oldwidth; 1854 uint32_t status32; 1855 1856 atom_perform_link_restart(ntb); 1857 1858 /* 1859 * There is a potential race between the 2 NTB devices recovering at 1860 * the same time. If the times are the same, the link will not recover 1861 * and the driver will be stuck in this loop forever. Add a random 1862 * interval to the recovery time to prevent this race. 1863 */ 1864 status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; 1865 pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); 1866 1867 if (atom_link_is_err(ntb)) 1868 goto retry; 1869 1870 status32 = ntb_reg_read(4, ntb->reg->ntb_ctl); 1871 if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) 1872 goto out; 1873 1874 status32 = ntb_reg_read(4, ntb->reg->lnk_sta); 1875 width = NTB_LNK_STA_WIDTH(status32); 1876 speed = status32 & NTB_LINK_SPEED_MASK; 1877 1878 oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); 1879 oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; 1880 if (oldwidth != width || oldspeed != speed) 1881 goto retry; 1882 1883out: 1884 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, 1885 ntb); 1886 return; 1887 1888retry: 1889 callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, 1890 ntb); 1891} 1892 1893/* 1894 * Polls the HW link status register(s); returns true if something has changed. 1895 */ 1896static bool 1897ntb_poll_link(struct ntb_softc *ntb) 1898{ 1899 uint32_t ntb_cntl; 1900 uint16_t reg_val; 1901 1902 if (ntb->type == NTB_ATOM) { 1903 ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1904 if (ntb_cntl == ntb->ntb_ctl) 1905 return (false); 1906 1907 ntb->ntb_ctl = ntb_cntl; 1908 ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta); 1909 } else { 1910 db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); 1911 1912 reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); 1913 if (reg_val == ntb->lnk_sta) 1914 return (false); 1915 1916 ntb->lnk_sta = reg_val; 1917 } 1918 return (true); 1919} 1920 1921static inline enum ntb_speed 1922ntb_link_sta_speed(struct ntb_softc *ntb) 1923{ 1924 1925 if (!link_is_up(ntb)) 1926 return (NTB_SPEED_NONE); 1927 return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); 1928} 1929 1930static inline enum ntb_width 1931ntb_link_sta_width(struct ntb_softc *ntb) 1932{ 1933 1934 if (!link_is_up(ntb)) 1935 return (NTB_WIDTH_NONE); 1936 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 1937} 1938 1939SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0, 1940 "Driver state, statistics, and HW registers"); 1941 1942#define NTB_REGSZ_MASK (3ul << 30) 1943#define NTB_REG_64 (1ul << 30) 1944#define NTB_REG_32 (2ul << 30) 1945#define NTB_REG_16 (3ul << 30) 1946#define NTB_REG_8 (0ul << 30) 1947 1948#define NTB_DB_READ (1ul << 29) 1949#define NTB_PCI_REG (1ul << 28) 1950#define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) 1951 1952static void 1953ntb_sysctl_init(struct ntb_softc *ntb) 1954{ 1955 struct sysctl_oid_list *tree_par, *regpar, *statpar, *errpar; 1956 struct sysctl_ctx_list *ctx; 1957 struct sysctl_oid *tree, *tmptree; 1958 1959 ctx = device_get_sysctl_ctx(ntb->device); 1960 1961 tree = SYSCTL_ADD_NODE(ctx, 1962 SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)), OID_AUTO, 1963 "debug_info", CTLFLAG_RD, NULL, 1964 "Driver state, statistics, and HW registers"); 1965 tree_par = SYSCTL_CHILDREN(tree); 1966 1967 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, 1968 &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); 1969 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, 1970 &ntb->dev_type, 0, "0 - USD; 1 - DSD"); 1971 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, 1972 &ntb->ppd, 0, "Raw PPD register (cached)"); 1973 1974 if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { 1975 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, 1976 &ntb->b2b_mw_idx, 0, 1977 "Index of the MW used for B2B remote register access"); 1978 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", 1979 CTLFLAG_RD, &ntb->b2b_off, 1980 "If non-zero, offset of B2B register region in shared MW"); 1981 } 1982 1983 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", 1984 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A", 1985 "Features/errata of this NTB device"); 1986 1987 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, 1988 __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, 1989 "NTB CTL register (cached)"); 1990 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, 1991 __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, 1992 "LNK STA register (cached)"); 1993 1994 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "link_status", 1995 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_link_status, 1996 "A", "Link status"); 1997 1998 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, 1999 &ntb->mw_count, 0, "MW count (excl. non-shared B2B register BAR)"); 2000 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, 2001 &ntb->spad_count, 0, "Scratchpad count"); 2002 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, 2003 &ntb->db_count, 0, "Doorbell count"); 2004 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, 2005 &ntb->db_vec_count, 0, "Doorbell vector count"); 2006 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, 2007 &ntb->db_vec_shift, 0, "Doorbell vector shift"); 2008 2009 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, 2010 &ntb->db_valid_mask, "Doorbell valid mask"); 2011 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, 2012 &ntb->db_link_mask, "Doorbell link mask"); 2013 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, 2014 &ntb->db_mask, "Doorbell mask (cached)"); 2015 2016 tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", 2017 CTLFLAG_RD, NULL, "Raw HW registers (big-endian)"); 2018 regpar = SYSCTL_CHILDREN(tmptree); 2019 2020 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", 2021 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2022 ntb->reg->ntb_ctl, sysctl_handle_register, "IU", 2023 "NTB Control register"); 2024 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", 2025 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2026 0x19c, sysctl_handle_register, "IU", 2027 "NTB Link Capabilities"); 2028 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", 2029 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2030 0x1a0, sysctl_handle_register, "IU", 2031 "NTB Link Control register"); 2032 2033 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", 2034 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2035 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, 2036 sysctl_handle_register, "QU", "Doorbell mask register"); 2037 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", 2038 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2039 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, 2040 sysctl_handle_register, "QU", "Doorbell register"); 2041 2042 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", 2043 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2044 NTB_REG_64 | ntb->xlat_reg->bar2_xlat, 2045 sysctl_handle_register, "QU", "Incoming XLAT23 register"); 2046 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2047 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", 2048 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2049 NTB_REG_32 | ntb->xlat_reg->bar4_xlat, 2050 sysctl_handle_register, "IU", "Incoming XLAT4 register"); 2051 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", 2052 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2053 NTB_REG_32 | ntb->xlat_reg->bar5_xlat, 2054 sysctl_handle_register, "IU", "Incoming XLAT5 register"); 2055 } else { 2056 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", 2057 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2058 NTB_REG_64 | ntb->xlat_reg->bar4_xlat, 2059 sysctl_handle_register, "QU", "Incoming XLAT45 register"); 2060 } 2061 2062 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", 2063 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2064 NTB_REG_64 | ntb->xlat_reg->bar2_limit, 2065 sysctl_handle_register, "QU", "Incoming LMT23 register"); 2066 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2067 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", 2068 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2069 NTB_REG_32 | ntb->xlat_reg->bar4_limit, 2070 sysctl_handle_register, "IU", "Incoming LMT4 register"); 2071 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", 2072 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2073 NTB_REG_32 | ntb->xlat_reg->bar5_limit, 2074 sysctl_handle_register, "IU", "Incoming LMT5 register"); 2075 } else { 2076 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", 2077 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2078 NTB_REG_64 | ntb->xlat_reg->bar4_limit, 2079 sysctl_handle_register, "QU", "Incoming LMT45 register"); 2080 } 2081 2082 if (ntb->type == NTB_ATOM) 2083 return; 2084 2085 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", 2086 CTLFLAG_RD, NULL, "Xeon HW statistics"); 2087 statpar = SYSCTL_CHILDREN(tmptree); 2088 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", 2089 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2090 NTB_REG_16 | XEON_USMEMMISS_OFFSET, 2091 sysctl_handle_register, "SU", "Upstream Memory Miss"); 2092 2093 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", 2094 CTLFLAG_RD, NULL, "Xeon HW errors"); 2095 errpar = SYSCTL_CHILDREN(tmptree); 2096 2097 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", 2098 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2099 NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, 2100 sysctl_handle_register, "CU", "PPD"); 2101 2102 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", 2103 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2104 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, 2105 sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); 2106 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", 2107 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2108 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, 2109 sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); 2110 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", 2111 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2112 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, 2113 sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); 2114 2115 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", 2116 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2117 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, 2118 sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); 2119 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", 2120 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2121 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, 2122 sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); 2123 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", 2124 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2125 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, 2126 sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); 2127 2128 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", 2129 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2130 NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, 2131 sysctl_handle_register, "SU", "DEVSTS"); 2132 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", 2133 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2134 NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, 2135 sysctl_handle_register, "SU", "LNKSTS"); 2136 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", 2137 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2138 NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, 2139 sysctl_handle_register, "SU", "SLNKSTS"); 2140 2141 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", 2142 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2143 NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, 2144 sysctl_handle_register, "IU", "UNCERRSTS"); 2145 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", 2146 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2147 NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, 2148 sysctl_handle_register, "IU", "CORERRSTS"); 2149 2150 if (ntb->conn_type != NTB_CONN_B2B) 2151 return; 2152 2153 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", 2154 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2155 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, 2156 sysctl_handle_register, "QU", "Outgoing XLAT23 register"); 2157 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2158 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", 2159 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2160 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2161 sysctl_handle_register, "IU", "Outgoing XLAT4 register"); 2162 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", 2163 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2164 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, 2165 sysctl_handle_register, "IU", "Outgoing XLAT5 register"); 2166 } else { 2167 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", 2168 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2169 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2170 sysctl_handle_register, "QU", "Outgoing XLAT45 register"); 2171 } 2172 2173 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", 2174 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2175 NTB_REG_64 | XEON_PBAR2LMT_OFFSET, 2176 sysctl_handle_register, "QU", "Outgoing LMT23 register"); 2177 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2178 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", 2179 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2180 NTB_REG_32 | XEON_PBAR4LMT_OFFSET, 2181 sysctl_handle_register, "IU", "Outgoing LMT4 register"); 2182 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", 2183 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2184 NTB_REG_32 | XEON_PBAR5LMT_OFFSET, 2185 sysctl_handle_register, "IU", "Outgoing LMT5 register"); 2186 } else { 2187 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", 2188 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2189 NTB_REG_64 | XEON_PBAR4LMT_OFFSET, 2190 sysctl_handle_register, "QU", "Outgoing LMT45 register"); 2191 } 2192 2193 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", 2194 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2195 NTB_REG_64 | ntb->xlat_reg->bar0_base, 2196 sysctl_handle_register, "QU", "Secondary BAR01 base register"); 2197 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", 2198 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2199 NTB_REG_64 | ntb->xlat_reg->bar2_base, 2200 sysctl_handle_register, "QU", "Secondary BAR23 base register"); 2201 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2202 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", 2203 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2204 NTB_REG_32 | ntb->xlat_reg->bar4_base, 2205 sysctl_handle_register, "IU", 2206 "Secondary BAR4 base register"); 2207 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", 2208 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2209 NTB_REG_32 | ntb->xlat_reg->bar5_base, 2210 sysctl_handle_register, "IU", 2211 "Secondary BAR5 base register"); 2212 } else { 2213 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", 2214 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2215 NTB_REG_64 | ntb->xlat_reg->bar4_base, 2216 sysctl_handle_register, "QU", 2217 "Secondary BAR45 base register"); 2218 } 2219} 2220 2221static int 2222sysctl_handle_features(SYSCTL_HANDLER_ARGS) 2223{ 2224 struct ntb_softc *ntb; 2225 struct sbuf sb; 2226 int error; 2227 2228 error = 0; 2229 ntb = arg1; 2230 2231 sbuf_new_for_sysctl(&sb, NULL, 256, req); 2232 2233 sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); 2234 error = sbuf_finish(&sb); 2235 sbuf_delete(&sb); 2236 2237 if (error || !req->newptr) 2238 return (error); 2239 return (EINVAL); 2240} 2241 2242static int 2243sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) 2244{ 2245 struct ntb_softc *ntb; 2246 struct sbuf sb; 2247 enum ntb_speed speed; 2248 enum ntb_width width; 2249 int error; 2250 2251 error = 0; 2252 ntb = arg1; 2253 2254 sbuf_new_for_sysctl(&sb, NULL, 32, req); 2255 2256 if (ntb_link_is_up(ntb, &speed, &width)) 2257 sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", 2258 (unsigned)speed, (unsigned)width); 2259 else 2260 sbuf_printf(&sb, "down"); 2261 2262 error = sbuf_finish(&sb); 2263 sbuf_delete(&sb); 2264 2265 if (error || !req->newptr) 2266 return (error); 2267 return (EINVAL); 2268} 2269 2270static int 2271sysctl_handle_register(SYSCTL_HANDLER_ARGS) 2272{ 2273 struct ntb_softc *ntb; 2274 const void *outp; 2275 uintptr_t sz; 2276 uint64_t umv; 2277 char be[sizeof(umv)]; 2278 size_t outsz; 2279 uint32_t reg; 2280 bool db, pci; 2281 int error; 2282 2283 ntb = arg1; 2284 reg = arg2 & ~NTB_REGFLAGS_MASK; 2285 sz = arg2 & NTB_REGSZ_MASK; 2286 db = (arg2 & NTB_DB_READ) != 0; 2287 pci = (arg2 & NTB_PCI_REG) != 0; 2288 2289 KASSERT(!(db && pci), ("bogus")); 2290 2291 if (db) { 2292 KASSERT(sz == NTB_REG_64, ("bogus")); 2293 umv = db_ioread(ntb, reg); 2294 outsz = sizeof(uint64_t); 2295 } else { 2296 switch (sz) { 2297 case NTB_REG_64: 2298 if (pci) 2299 umv = pci_read_config(ntb->device, reg, 8); 2300 else 2301 umv = ntb_reg_read(8, reg); 2302 outsz = sizeof(uint64_t); 2303 break; 2304 case NTB_REG_32: 2305 if (pci) 2306 umv = pci_read_config(ntb->device, reg, 4); 2307 else 2308 umv = ntb_reg_read(4, reg); 2309 outsz = sizeof(uint32_t); 2310 break; 2311 case NTB_REG_16: 2312 if (pci) 2313 umv = pci_read_config(ntb->device, reg, 2); 2314 else 2315 umv = ntb_reg_read(2, reg); 2316 outsz = sizeof(uint16_t); 2317 break; 2318 case NTB_REG_8: 2319 if (pci) 2320 umv = pci_read_config(ntb->device, reg, 1); 2321 else 2322 umv = ntb_reg_read(1, reg); 2323 outsz = sizeof(uint8_t); 2324 break; 2325 default: 2326 panic("bogus"); 2327 break; 2328 } 2329 } 2330 2331 /* Encode bigendian so that sysctl -x is legible. */ 2332 be64enc(be, umv); 2333 outp = ((char *)be) + sizeof(umv) - outsz; 2334 2335 error = SYSCTL_OUT(req, outp, outsz); 2336 if (error || !req->newptr) 2337 return (error); 2338 return (EINVAL); 2339} 2340 2341/* 2342 * Public API to the rest of the OS 2343 */ 2344 2345/** 2346 * ntb_get_max_spads() - get the total scratch regs usable 2347 * @ntb: pointer to ntb_softc instance 2348 * 2349 * This function returns the max 32bit scratchpad registers usable by the 2350 * upper layer. 2351 * 2352 * RETURNS: total number of scratch pad registers available 2353 */ 2354uint8_t 2355ntb_get_max_spads(struct ntb_softc *ntb) 2356{ 2357 2358 return (ntb->spad_count); 2359} 2360 2361uint8_t 2362ntb_mw_count(struct ntb_softc *ntb) 2363{ 2364 2365 return (ntb->mw_count); 2366} 2367 2368/** 2369 * ntb_spad_write() - write to the secondary scratchpad register 2370 * @ntb: pointer to ntb_softc instance 2371 * @idx: index to the scratchpad register, 0 based 2372 * @val: the data value to put into the register 2373 * 2374 * This function allows writing of a 32bit value to the indexed scratchpad 2375 * register. The register resides on the secondary (external) side. 2376 * 2377 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2378 */ 2379int 2380ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 2381{ 2382 2383 if (idx >= ntb->spad_count) 2384 return (EINVAL); 2385 2386 ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); 2387 2388 return (0); 2389} 2390 2391/** 2392 * ntb_spad_read() - read from the primary scratchpad register 2393 * @ntb: pointer to ntb_softc instance 2394 * @idx: index to scratchpad register, 0 based 2395 * @val: pointer to 32bit integer for storing the register value 2396 * 2397 * This function allows reading of the 32bit scratchpad register on 2398 * the primary (internal) side. 2399 * 2400 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2401 */ 2402int 2403ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 2404{ 2405 2406 if (idx >= ntb->spad_count) 2407 return (EINVAL); 2408 2409 *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4); 2410 2411 return (0); 2412} 2413 2414/** 2415 * ntb_peer_spad_write() - write to the secondary scratchpad register 2416 * @ntb: pointer to ntb_softc instance 2417 * @idx: index to the scratchpad register, 0 based 2418 * @val: the data value to put into the register 2419 * 2420 * This function allows writing of a 32bit value to the indexed scratchpad 2421 * register. The register resides on the secondary (external) side. 2422 * 2423 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2424 */ 2425int 2426ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 2427{ 2428 2429 if (idx >= ntb->spad_count) 2430 return (EINVAL); 2431 2432 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 2433 ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); 2434 else 2435 ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); 2436 2437 return (0); 2438} 2439 2440/** 2441 * ntb_peer_spad_read() - read from the primary scratchpad register 2442 * @ntb: pointer to ntb_softc instance 2443 * @idx: index to scratchpad register, 0 based 2444 * @val: pointer to 32bit integer for storing the register value 2445 * 2446 * This function allows reading of the 32bit scratchpad register on 2447 * the primary (internal) side. 2448 * 2449 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2450 */ 2451int 2452ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 2453{ 2454 2455 if (idx >= ntb->spad_count) 2456 return (EINVAL); 2457 2458 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 2459 *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); 2460 else 2461 *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); 2462 2463 return (0); 2464} 2465 2466/* 2467 * ntb_mw_get_range() - get the range of a memory window 2468 * @ntb: NTB device context 2469 * @idx: Memory window number 2470 * @base: OUT - the base address for mapping the memory window 2471 * @size: OUT - the size for mapping the memory window 2472 * @align: OUT - the base alignment for translating the memory window 2473 * @align_size: OUT - the size alignment for translating the memory window 2474 * 2475 * Get the range of a memory window. NULL may be given for any output 2476 * parameter if the value is not needed. The base and size may be used for 2477 * mapping the memory window, to access the peer memory. The alignment and 2478 * size may be used for translating the memory window, for the peer to access 2479 * memory on the local system. 2480 * 2481 * Return: Zero on success, otherwise an error number. 2482 */ 2483int 2484ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base, 2485 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size) 2486{ 2487 struct ntb_pci_bar_info *bar; 2488 size_t bar_b2b_off; 2489 2490 if (mw_idx >= ntb_mw_count(ntb)) 2491 return (EINVAL); 2492 2493 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, mw_idx)]; 2494 bar_b2b_off = 0; 2495 if (mw_idx == ntb->b2b_mw_idx) { 2496 KASSERT(ntb->b2b_off != 0, 2497 ("user shouldn't get non-shared b2b mw")); 2498 bar_b2b_off = ntb->b2b_off; 2499 } 2500 2501 if (base != NULL) 2502 *base = bar->pbase + bar_b2b_off; 2503 if (vbase != NULL) 2504 *vbase = bar->vbase + bar_b2b_off; 2505 if (size != NULL) 2506 *size = bar->size - bar_b2b_off; 2507 if (align != NULL) 2508 *align = bar->size; 2509 if (align_size != NULL) 2510 *align_size = 1; 2511 return (0); 2512} 2513 2514/* 2515 * ntb_mw_set_trans() - set the translation of a memory window 2516 * @ntb: NTB device context 2517 * @idx: Memory window number 2518 * @addr: The dma address local memory to expose to the peer 2519 * @size: The size of the local memory to expose to the peer 2520 * 2521 * Set the translation of a memory window. The peer may access local memory 2522 * through the window starting at the address, up to the size. The address 2523 * must be aligned to the alignment specified by ntb_mw_get_range(). The size 2524 * must be aligned to the size alignment specified by ntb_mw_get_range(). 2525 * 2526 * Return: Zero on success, otherwise an error number. 2527 */ 2528int 2529ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr, 2530 size_t size) 2531{ 2532 struct ntb_pci_bar_info *bar; 2533 uint64_t base, limit, reg_val; 2534 size_t bar_size, mw_size; 2535 uint32_t base_reg, xlat_reg, limit_reg; 2536 enum ntb_bar bar_num; 2537 2538 if (idx >= ntb_mw_count(ntb)) 2539 return (EINVAL); 2540 2541 bar_num = ntb_mw_to_bar(ntb, idx); 2542 bar = &ntb->bar_info[bar_num]; 2543 2544 bar_size = bar->size; 2545 if (idx == ntb->b2b_mw_idx) 2546 mw_size = bar_size - ntb->b2b_off; 2547 else 2548 mw_size = bar_size; 2549 2550 /* Hardware requires that addr is aligned to bar size */ 2551 if ((addr & (bar_size - 1)) != 0) 2552 return (EINVAL); 2553 2554 if (size > mw_size) 2555 return (EINVAL); 2556 2557 bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); 2558 2559 limit = 0; 2560 if (bar_is_64bit(ntb, bar_num)) { 2561 base = ntb_reg_read(8, base_reg); 2562 2563 if (limit_reg != 0 && size != mw_size) 2564 limit = base + size; 2565 2566 /* Set and verify translation address */ 2567 ntb_reg_write(8, xlat_reg, addr); 2568 reg_val = ntb_reg_read(8, xlat_reg); 2569 if (reg_val != addr) { 2570 ntb_reg_write(8, xlat_reg, 0); 2571 return (EIO); 2572 } 2573 2574 /* Set and verify the limit */ 2575 ntb_reg_write(8, limit_reg, limit); 2576 reg_val = ntb_reg_read(8, limit_reg); 2577 if (reg_val != limit) { 2578 ntb_reg_write(8, limit_reg, base); 2579 ntb_reg_write(8, xlat_reg, 0); 2580 return (EIO); 2581 } 2582 } else { 2583 /* Configure 32-bit (split) BAR MW */ 2584 2585 if ((addr & UINT32_MAX) != addr) 2586 return (EINVAL); 2587 if (((addr + size) & UINT32_MAX) != (addr + size)) 2588 return (EINVAL); 2589 2590 base = ntb_reg_read(4, base_reg); 2591 2592 if (limit_reg != 0 && size != mw_size) 2593 limit = base + size; 2594 2595 /* Set and verify translation address */ 2596 ntb_reg_write(4, xlat_reg, addr); 2597 reg_val = ntb_reg_read(4, xlat_reg); 2598 if (reg_val != addr) { 2599 ntb_reg_write(4, xlat_reg, 0); 2600 return (EIO); 2601 } 2602 2603 /* Set and verify the limit */ 2604 ntb_reg_write(4, limit_reg, limit); 2605 reg_val = ntb_reg_read(4, limit_reg); 2606 if (reg_val != limit) { 2607 ntb_reg_write(4, limit_reg, base); 2608 ntb_reg_write(4, xlat_reg, 0); 2609 return (EIO); 2610 } 2611 } 2612 return (0); 2613} 2614 2615/* 2616 * ntb_mw_clear_trans() - clear the translation of a memory window 2617 * @ntb: NTB device context 2618 * @idx: Memory window number 2619 * 2620 * Clear the translation of a memory window. The peer may no longer access 2621 * local memory through the window. 2622 * 2623 * Return: Zero on success, otherwise an error number. 2624 */ 2625int 2626ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx) 2627{ 2628 2629 return (ntb_mw_set_trans(ntb, mw_idx, 0, 0)); 2630} 2631 2632/** 2633 * ntb_peer_db_set() - Set the doorbell on the secondary/external side 2634 * @ntb: pointer to ntb_softc instance 2635 * @bit: doorbell bits to ring 2636 * 2637 * This function allows triggering of a doorbell on the secondary/external 2638 * side that will initiate an interrupt on the remote host 2639 */ 2640void 2641ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit) 2642{ 2643 2644 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { 2645 ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit); 2646 return; 2647 } 2648 2649 db_iowrite(ntb, ntb->peer_reg->db_bell, bit); 2650} 2651 2652/* 2653 * ntb_get_peer_db_addr() - Return the address of the remote doorbell register, 2654 * as well as the size of the register (via *sz_out). 2655 * 2656 * This function allows a caller using I/OAT DMA to chain the remote doorbell 2657 * ring to its memory window write. 2658 * 2659 * Note that writing the peer doorbell via a memory window will *not* generate 2660 * an interrupt on the remote host; that must be done seperately. 2661 */ 2662bus_addr_t 2663ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out) 2664{ 2665 struct ntb_pci_bar_info *bar; 2666 uint64_t regoff; 2667 2668 KASSERT(sz_out != NULL, ("must be non-NULL")); 2669 2670 if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { 2671 bar = &ntb->bar_info[NTB_CONFIG_BAR]; 2672 regoff = ntb->peer_reg->db_bell; 2673 } else { 2674 KASSERT((HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 2) || 2675 (!HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 1), 2676 ("mw_count invalid after setup")); 2677 KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, 2678 ("invalid b2b idx")); 2679 2680 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; 2681 regoff = XEON_PDOORBELL_OFFSET; 2682 } 2683 KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); 2684 2685 *sz_out = ntb->reg->db_size; 2686 /* HACK: Specific to current x86 bus implementation. */ 2687 return ((uint64_t)bar->pci_bus_handle + regoff); 2688} 2689 2690/* 2691 * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb 2692 * @ntb: NTB device context 2693 * 2694 * Hardware may support different number or arrangement of doorbell bits. 2695 * 2696 * Return: A mask of doorbell bits supported by the ntb. 2697 */ 2698uint64_t 2699ntb_db_valid_mask(struct ntb_softc *ntb) 2700{ 2701 2702 return (ntb->db_valid_mask); 2703} 2704 2705/* 2706 * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector 2707 * @ntb: NTB device context 2708 * @vector: Doorbell vector number 2709 * 2710 * Each interrupt vector may have a different number or arrangement of bits. 2711 * 2712 * Return: A mask of doorbell bits serviced by a vector. 2713 */ 2714uint64_t 2715ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector) 2716{ 2717 2718 if (vector > ntb->db_vec_count) 2719 return (0); 2720 return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector)); 2721} 2722 2723/** 2724 * ntb_link_is_up() - get the current ntb link state 2725 * @ntb: NTB device context 2726 * @speed: OUT - The link speed expressed as PCIe generation number 2727 * @width: OUT - The link width expressed as the number of PCIe lanes 2728 * 2729 * RETURNS: true or false based on the hardware link state 2730 */ 2731bool 2732ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed, 2733 enum ntb_width *width) 2734{ 2735 2736 if (speed != NULL) 2737 *speed = ntb_link_sta_speed(ntb); 2738 if (width != NULL) 2739 *width = ntb_link_sta_width(ntb); 2740 return (link_is_up(ntb)); 2741} 2742 2743static void 2744save_bar_parameters(struct ntb_pci_bar_info *bar) 2745{ 2746 2747 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 2748 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 2749 bar->pbase = rman_get_start(bar->pci_resource); 2750 bar->size = rman_get_size(bar->pci_resource); 2751 bar->vbase = rman_get_virtual(bar->pci_resource); 2752} 2753 2754device_t 2755ntb_get_device(struct ntb_softc *ntb) 2756{ 2757 2758 return (ntb->device); 2759} 2760 2761/* Export HW-specific errata information. */ 2762bool 2763ntb_has_feature(struct ntb_softc *ntb, uint32_t feature) 2764{ 2765 2766 return (HAS_FEATURE(feature)); 2767} 2768