ntb_hw_intel.c revision 291032
159243Sobrien/*- 259243Sobrien * Copyright (C) 2013 Intel Corporation 359243Sobrien * Copyright (C) 2015 EMC Corporation 459243Sobrien * All rights reserved. 559243Sobrien * 659243Sobrien * Redistribution and use in source and binary forms, with or without 759243Sobrien * modification, are permitted provided that the following conditions 859243Sobrien * are met: 959243Sobrien * 1. Redistributions of source code must retain the above copyright 1059243Sobrien * notice, this list of conditions and the following disclaimer. 1159243Sobrien * 2. Redistributions in binary form must reproduce the above copyright 1259243Sobrien * notice, this list of conditions and the following disclaimer in the 1359243Sobrien * documentation and/or other materials provided with the distribution. 1459243Sobrien * 1559243Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1659243Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1759243Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1859243Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1959243Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2059243Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2159243Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2259243Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2359243Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2459243Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2559243Sobrien * SUCH DAMAGE. 2659243Sobrien */ 2759243Sobrien 2859243Sobrien#include <sys/cdefs.h> 2959243Sobrien__FBSDID("$FreeBSD: head/sys/dev/ntb/ntb_hw/ntb_hw.c 291032 2015-11-18 22:20:31Z cem $"); 3059243Sobrien 3159243Sobrien#include <sys/param.h> 3259243Sobrien#include <sys/kernel.h> 3359243Sobrien#include <sys/systm.h> 3459243Sobrien#include <sys/bus.h> 3559243Sobrien#include <sys/endian.h> 3659243Sobrien#include <sys/malloc.h> 3759243Sobrien#include <sys/module.h> 3859243Sobrien#include <sys/queue.h> 3959243Sobrien#include <sys/rman.h> 4059243Sobrien#include <sys/sbuf.h> 4159243Sobrien#include <sys/sysctl.h> 4259243Sobrien#include <vm/vm.h> 4359243Sobrien#include <vm/pmap.h> 4459243Sobrien#include <machine/bus.h> 45#include <machine/pmap.h> 46#include <machine/resource.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49 50#include "ntb_regs.h" 51#include "ntb_hw.h" 52 53/* 54 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 55 * allows you to connect two systems using a PCI-e link. 56 * 57 * This module contains the hardware abstraction layer for the NTB. It allows 58 * you to send and recieve interrupts, map the memory windows and send and 59 * receive messages in the scratch-pad registers. 60 * 61 * NOTE: Much of the code in this module is shared with Linux. Any patches may 62 * be picked up and redistributed in Linux with a dual GPL/BSD license. 63 */ 64 65#define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT) 66 67#define NTB_HB_TIMEOUT 1 /* second */ 68#define ATOM_LINK_RECOVERY_TIME 500 /* ms */ 69#define BAR_HIGH_MASK (~((1ull << 12) - 1)) 70 71#define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) 72 73enum ntb_device_type { 74 NTB_XEON, 75 NTB_ATOM 76}; 77 78/* ntb_conn_type are hardware numbers, cannot change. */ 79enum ntb_conn_type { 80 NTB_CONN_TRANSPARENT = 0, 81 NTB_CONN_B2B = 1, 82 NTB_CONN_RP = 2, 83}; 84 85enum ntb_b2b_direction { 86 NTB_DEV_USD = 0, 87 NTB_DEV_DSD = 1, 88}; 89 90enum ntb_bar { 91 NTB_CONFIG_BAR = 0, 92 NTB_B2B_BAR_1, 93 NTB_B2B_BAR_2, 94 NTB_B2B_BAR_3, 95 NTB_MAX_BARS 96}; 97 98/* Device features and workarounds */ 99#define HAS_FEATURE(feature) \ 100 ((ntb->features & (feature)) != 0) 101 102struct ntb_hw_info { 103 uint32_t device_id; 104 const char *desc; 105 enum ntb_device_type type; 106 uint32_t features; 107}; 108 109struct ntb_pci_bar_info { 110 bus_space_tag_t pci_bus_tag; 111 bus_space_handle_t pci_bus_handle; 112 int pci_resource_id; 113 struct resource *pci_resource; 114 vm_paddr_t pbase; 115 caddr_t vbase; 116 vm_size_t size; 117 bool mapped_wc : 1; 118 119 /* Configuration register offsets */ 120 uint32_t psz_off; 121 uint32_t ssz_off; 122 uint32_t pbarxlat_off; 123}; 124 125struct ntb_int_info { 126 struct resource *res; 127 int rid; 128 void *tag; 129}; 130 131struct ntb_vec { 132 struct ntb_softc *ntb; 133 uint32_t num; 134}; 135 136struct ntb_reg { 137 uint32_t ntb_ctl; 138 uint32_t lnk_sta; 139 uint8_t db_size; 140 unsigned mw_bar[NTB_MAX_BARS]; 141}; 142 143struct ntb_alt_reg { 144 uint32_t db_bell; 145 uint32_t db_mask; 146 uint32_t spad; 147}; 148 149struct ntb_xlat_reg { 150 uint32_t bar0_base; 151 uint32_t bar2_base; 152 uint32_t bar4_base; 153 uint32_t bar5_base; 154 155 uint32_t bar2_xlat; 156 uint32_t bar4_xlat; 157 uint32_t bar5_xlat; 158 159 uint32_t bar2_limit; 160 uint32_t bar4_limit; 161 uint32_t bar5_limit; 162}; 163 164struct ntb_b2b_addr { 165 uint64_t bar0_addr; 166 uint64_t bar2_addr64; 167 uint64_t bar4_addr64; 168 uint64_t bar4_addr32; 169 uint64_t bar5_addr32; 170}; 171 172struct ntb_softc { 173 device_t device; 174 enum ntb_device_type type; 175 uint32_t features; 176 177 struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; 178 struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; 179 uint32_t allocated_interrupts; 180 181 struct callout heartbeat_timer; 182 struct callout lr_timer; 183 184 void *ntb_ctx; 185 const struct ntb_ctx_ops *ctx_ops; 186 struct ntb_vec *msix_vec; 187#define CTX_LOCK(sc) mtx_lock(&(sc)->ctx_lock) 188#define CTX_UNLOCK(sc) mtx_unlock(&(sc)->ctx_lock) 189#define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f)) 190 struct mtx ctx_lock; 191 192 uint32_t ppd; 193 enum ntb_conn_type conn_type; 194 enum ntb_b2b_direction dev_type; 195 196 /* Offset of peer bar0 in B2B BAR */ 197 uint64_t b2b_off; 198 /* Memory window used to access peer bar0 */ 199#define B2B_MW_DISABLED UINT8_MAX 200 uint8_t b2b_mw_idx; 201 202 uint8_t mw_count; 203 uint8_t spad_count; 204 uint8_t db_count; 205 uint8_t db_vec_count; 206 uint8_t db_vec_shift; 207 208 /* Protects local db_mask. */ 209#define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) 210#define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) 211#define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) 212 struct mtx db_mask_lock; 213 214 volatile uint32_t ntb_ctl; 215 volatile uint32_t lnk_sta; 216 217 uint64_t db_valid_mask; 218 uint64_t db_link_mask; 219 uint64_t db_mask; 220 221 int last_ts; /* ticks @ last irq */ 222 223 const struct ntb_reg *reg; 224 const struct ntb_alt_reg *self_reg; 225 const struct ntb_alt_reg *peer_reg; 226 const struct ntb_xlat_reg *xlat_reg; 227}; 228 229#ifdef __i386__ 230static __inline uint64_t 231bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 232 bus_size_t offset) 233{ 234 235 return (bus_space_read_4(tag, handle, offset) | 236 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 237} 238 239static __inline void 240bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 241 bus_size_t offset, uint64_t val) 242{ 243 244 bus_space_write_4(tag, handle, offset, val); 245 bus_space_write_4(tag, handle, offset + 4, val >> 32); 246} 247#endif 248 249#define ntb_bar_read(SIZE, bar, offset) \ 250 bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 251 ntb->bar_info[(bar)].pci_bus_handle, (offset)) 252#define ntb_bar_write(SIZE, bar, offset, val) \ 253 bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 254 ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) 255#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) 256#define ntb_reg_write(SIZE, offset, val) \ 257 ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) 258#define ntb_mw_read(SIZE, offset) \ 259 ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset) 260#define ntb_mw_write(SIZE, offset, val) \ 261 ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ 262 offset, val) 263 264static int ntb_probe(device_t device); 265static int ntb_attach(device_t device); 266static int ntb_detach(device_t device); 267static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw); 268static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); 269static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, 270 uint32_t *base, uint32_t *xlat, uint32_t *lmt); 271static int ntb_map_pci_bars(struct ntb_softc *ntb); 272static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, 273 const char *); 274static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); 275static int map_memory_window_bar(struct ntb_softc *ntb, 276 struct ntb_pci_bar_info *bar); 277static void ntb_unmap_pci_bar(struct ntb_softc *ntb); 278static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); 279static int ntb_init_isr(struct ntb_softc *ntb); 280static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); 281static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); 282static void ntb_teardown_interrupts(struct ntb_softc *ntb); 283static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); 284static void ntb_interrupt(struct ntb_softc *, uint32_t vec); 285static void ndev_vec_isr(void *arg); 286static void ndev_irq_isr(void *arg); 287static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); 288static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); 289static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); 290static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); 291static void ntb_free_msix_vec(struct ntb_softc *ntb); 292static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); 293static void ntb_detect_max_mw(struct ntb_softc *ntb); 294static int ntb_detect_xeon(struct ntb_softc *ntb); 295static int ntb_detect_atom(struct ntb_softc *ntb); 296static int ntb_xeon_init_dev(struct ntb_softc *ntb); 297static int ntb_atom_init_dev(struct ntb_softc *ntb); 298static void ntb_teardown_xeon(struct ntb_softc *ntb); 299static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); 300static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, 301 enum ntb_bar regbar); 302static void xeon_set_sbar_base_and_limit(struct ntb_softc *, 303 uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); 304static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, 305 enum ntb_bar idx); 306static int xeon_setup_b2b_mw(struct ntb_softc *, 307 const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); 308static inline bool link_is_up(struct ntb_softc *ntb); 309static inline bool atom_link_is_err(struct ntb_softc *ntb); 310static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *); 311static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *); 312static void atom_link_hb(void *arg); 313static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec); 314static void recover_atom_link(void *arg); 315static bool ntb_poll_link(struct ntb_softc *ntb); 316static void save_bar_parameters(struct ntb_pci_bar_info *bar); 317static void ntb_sysctl_init(struct ntb_softc *); 318static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); 319static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); 320static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); 321 322static unsigned g_ntb_hw_debug_level; 323SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 324 &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); 325#define ntb_printf(lvl, ...) do { \ 326 if ((lvl) <= g_ntb_hw_debug_level) { \ 327 device_printf(ntb->device, __VA_ARGS__); \ 328 } \ 329} while (0) 330 331static unsigned g_ntb_enable_wc = 1; 332SYSCTL_UINT(_hw_ntb, OID_AUTO, enable_writecombine, CTLFLAG_RDTUN, 333 &g_ntb_enable_wc, 0, "Set to 1 to map memory windows write combining"); 334 335static struct ntb_hw_info pci_ids[] = { 336 /* XXX: PS/SS IDs left out until they are supported. */ 337 { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", 338 NTB_ATOM, 0 }, 339 340 { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", 341 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 342 { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", 343 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 344 { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, 345 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 346 NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, 347 { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, 348 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 349 NTB_SB01BASE_LOCKUP }, 350 { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, 351 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 352 NTB_SB01BASE_LOCKUP }, 353 354 { 0x00000000, NULL, NTB_ATOM, 0 } 355}; 356 357static const struct ntb_reg atom_reg = { 358 .ntb_ctl = ATOM_NTBCNTL_OFFSET, 359 .lnk_sta = ATOM_LINK_STATUS_OFFSET, 360 .db_size = sizeof(uint64_t), 361 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, 362}; 363 364static const struct ntb_alt_reg atom_pri_reg = { 365 .db_bell = ATOM_PDOORBELL_OFFSET, 366 .db_mask = ATOM_PDBMSK_OFFSET, 367 .spad = ATOM_SPAD_OFFSET, 368}; 369 370static const struct ntb_alt_reg atom_b2b_reg = { 371 .db_bell = ATOM_B2B_DOORBELL_OFFSET, 372 .spad = ATOM_B2B_SPAD_OFFSET, 373}; 374 375static const struct ntb_xlat_reg atom_sec_xlat = { 376#if 0 377 /* "FIXME" says the Linux driver. */ 378 .bar0_base = ATOM_SBAR0BASE_OFFSET, 379 .bar2_base = ATOM_SBAR2BASE_OFFSET, 380 .bar4_base = ATOM_SBAR4BASE_OFFSET, 381 382 .bar2_limit = ATOM_SBAR2LMT_OFFSET, 383 .bar4_limit = ATOM_SBAR4LMT_OFFSET, 384#endif 385 386 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, 387 .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, 388}; 389 390static const struct ntb_reg xeon_reg = { 391 .ntb_ctl = XEON_NTBCNTL_OFFSET, 392 .lnk_sta = XEON_LINK_STATUS_OFFSET, 393 .db_size = sizeof(uint16_t), 394 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, 395}; 396 397static const struct ntb_alt_reg xeon_pri_reg = { 398 .db_bell = XEON_PDOORBELL_OFFSET, 399 .db_mask = XEON_PDBMSK_OFFSET, 400 .spad = XEON_SPAD_OFFSET, 401}; 402 403static const struct ntb_alt_reg xeon_b2b_reg = { 404 .db_bell = XEON_B2B_DOORBELL_OFFSET, 405 .spad = XEON_B2B_SPAD_OFFSET, 406}; 407 408static const struct ntb_xlat_reg xeon_sec_xlat = { 409 .bar0_base = XEON_SBAR0BASE_OFFSET, 410 .bar2_base = XEON_SBAR2BASE_OFFSET, 411 .bar4_base = XEON_SBAR4BASE_OFFSET, 412 .bar5_base = XEON_SBAR5BASE_OFFSET, 413 414 .bar2_limit = XEON_SBAR2LMT_OFFSET, 415 .bar4_limit = XEON_SBAR4LMT_OFFSET, 416 .bar5_limit = XEON_SBAR5LMT_OFFSET, 417 418 .bar2_xlat = XEON_SBAR2XLAT_OFFSET, 419 .bar4_xlat = XEON_SBAR4XLAT_OFFSET, 420 .bar5_xlat = XEON_SBAR5XLAT_OFFSET, 421}; 422 423static struct ntb_b2b_addr xeon_b2b_usd_addr = { 424 .bar0_addr = XEON_B2B_BAR0_ADDR, 425 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 426 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 427 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 428 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 429}; 430 431static struct ntb_b2b_addr xeon_b2b_dsd_addr = { 432 .bar0_addr = XEON_B2B_BAR0_ADDR, 433 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 434 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 435 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 436 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 437}; 438 439SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0, 440 "B2B MW segment overrides -- MUST be the same on both sides"); 441 442SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, 443 &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 444 "hardware, use this 64-bit address on the bus between the NTB devices for " 445 "the window at BAR2, on the upstream side of the link. MUST be the same " 446 "address on both sides."); 447SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, 448 &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); 449SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, 450 &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " 451 "(split-BAR mode)."); 452SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, 453 &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " 454 "(split-BAR mode)."); 455 456SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, 457 &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 458 "hardware, use this 64-bit address on the bus between the NTB devices for " 459 "the window at BAR2, on the downstream side of the link. MUST be the same" 460 " address on both sides."); 461SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, 462 &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); 463SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, 464 &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " 465 "(split-BAR mode)."); 466SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, 467 &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " 468 "(split-BAR mode)."); 469 470/* 471 * OS <-> Driver interface structures 472 */ 473MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); 474 475static device_method_t ntb_pci_methods[] = { 476 /* Device interface */ 477 DEVMETHOD(device_probe, ntb_probe), 478 DEVMETHOD(device_attach, ntb_attach), 479 DEVMETHOD(device_detach, ntb_detach), 480 DEVMETHOD_END 481}; 482 483static driver_t ntb_pci_driver = { 484 "ntb_hw", 485 ntb_pci_methods, 486 sizeof(struct ntb_softc), 487}; 488 489static devclass_t ntb_devclass; 490DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); 491MODULE_VERSION(ntb_hw, 1); 492 493SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); 494 495/* 496 * OS <-> Driver linkage functions 497 */ 498static int 499ntb_probe(device_t device) 500{ 501 struct ntb_hw_info *p; 502 503 p = ntb_get_device_info(pci_get_devid(device)); 504 if (p == NULL) 505 return (ENXIO); 506 507 device_set_desc(device, p->desc); 508 return (0); 509} 510 511static int 512ntb_attach(device_t device) 513{ 514 struct ntb_softc *ntb; 515 struct ntb_hw_info *p; 516 int error; 517 518 ntb = DEVICE2SOFTC(device); 519 p = ntb_get_device_info(pci_get_devid(device)); 520 521 ntb->device = device; 522 ntb->type = p->type; 523 ntb->features = p->features; 524 ntb->b2b_mw_idx = B2B_MW_DISABLED; 525 526 /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ 527 callout_init(&ntb->heartbeat_timer, 1); 528 callout_init(&ntb->lr_timer, 1); 529 mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); 530 mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_DEF); 531 532 if (ntb->type == NTB_ATOM) 533 error = ntb_detect_atom(ntb); 534 else 535 error = ntb_detect_xeon(ntb); 536 if (error != 0) 537 goto out; 538 539 ntb_detect_max_mw(ntb); 540 541 pci_enable_busmaster(ntb->device); 542 543 error = ntb_map_pci_bars(ntb); 544 if (error != 0) 545 goto out; 546 if (ntb->type == NTB_ATOM) 547 error = ntb_atom_init_dev(ntb); 548 else 549 error = ntb_xeon_init_dev(ntb); 550 if (error != 0) 551 goto out; 552 553 ntb_poll_link(ntb); 554 555 ntb_sysctl_init(ntb); 556 557out: 558 if (error != 0) 559 ntb_detach(device); 560 return (error); 561} 562 563static int 564ntb_detach(device_t device) 565{ 566 struct ntb_softc *ntb; 567 568 ntb = DEVICE2SOFTC(device); 569 570 if (ntb->self_reg != NULL) 571 ntb_db_set_mask(ntb, ntb->db_valid_mask); 572 callout_drain(&ntb->heartbeat_timer); 573 callout_drain(&ntb->lr_timer); 574 pci_disable_busmaster(ntb->device); 575 if (ntb->type == NTB_XEON) 576 ntb_teardown_xeon(ntb); 577 ntb_teardown_interrupts(ntb); 578 579 mtx_destroy(&ntb->db_mask_lock); 580 mtx_destroy(&ntb->ctx_lock); 581 582 /* 583 * Redetect total MWs so we unmap properly -- in case we lowered the 584 * maximum to work around Xeon errata. 585 */ 586 ntb_detect_max_mw(ntb); 587 ntb_unmap_pci_bar(ntb); 588 589 return (0); 590} 591 592/* 593 * Driver internal routines 594 */ 595static inline enum ntb_bar 596ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) 597{ 598 599 KASSERT(mw < ntb->mw_count || 600 (mw != B2B_MW_DISABLED && mw == ntb->b2b_mw_idx), 601 ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); 602 KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); 603 604 return (ntb->reg->mw_bar[mw]); 605} 606 607static inline bool 608bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) 609{ 610 /* XXX This assertion could be stronger. */ 611 KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); 612 return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR)); 613} 614 615static inline void 616bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, 617 uint32_t *xlat, uint32_t *lmt) 618{ 619 uint32_t basev, lmtv, xlatv; 620 621 switch (bar) { 622 case NTB_B2B_BAR_1: 623 basev = ntb->xlat_reg->bar2_base; 624 lmtv = ntb->xlat_reg->bar2_limit; 625 xlatv = ntb->xlat_reg->bar2_xlat; 626 break; 627 case NTB_B2B_BAR_2: 628 basev = ntb->xlat_reg->bar4_base; 629 lmtv = ntb->xlat_reg->bar4_limit; 630 xlatv = ntb->xlat_reg->bar4_xlat; 631 break; 632 case NTB_B2B_BAR_3: 633 basev = ntb->xlat_reg->bar5_base; 634 lmtv = ntb->xlat_reg->bar5_limit; 635 xlatv = ntb->xlat_reg->bar5_xlat; 636 break; 637 default: 638 KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, 639 ("bad bar")); 640 basev = lmtv = xlatv = 0; 641 break; 642 } 643 644 if (base != NULL) 645 *base = basev; 646 if (xlat != NULL) 647 *xlat = xlatv; 648 if (lmt != NULL) 649 *lmt = lmtv; 650} 651 652static int 653ntb_map_pci_bars(struct ntb_softc *ntb) 654{ 655 int rc; 656 657 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 658 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 659 if (rc != 0) 660 goto out; 661 662 ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); 663 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); 664 if (rc != 0) 665 goto out; 666 ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; 667 ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; 668 ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; 669 670 ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); 671 /* XXX Are shared MW B2Bs write-combining? */ 672 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP) && !HAS_FEATURE(NTB_SPLIT_BAR)) 673 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 674 else 675 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 676 ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; 677 ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; 678 ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; 679 680 if (!HAS_FEATURE(NTB_SPLIT_BAR)) 681 goto out; 682 683 ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); 684 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 685 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 686 else 687 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 688 ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; 689 ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; 690 ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; 691 692out: 693 if (rc != 0) 694 device_printf(ntb->device, 695 "unable to allocate pci resource\n"); 696 return (rc); 697} 698 699static void 700print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, 701 const char *kind) 702{ 703 704 device_printf(ntb->device, 705 "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", 706 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 707 (char *)bar->vbase + bar->size - 1, 708 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 709 (uintmax_t)bar->size, kind); 710} 711 712static int 713map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 714{ 715 716 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 717 &bar->pci_resource_id, RF_ACTIVE); 718 if (bar->pci_resource == NULL) 719 return (ENXIO); 720 721 save_bar_parameters(bar); 722 print_map_success(ntb, bar, "mmr"); 723 return (0); 724} 725 726static int 727map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 728{ 729 int rc; 730 uint8_t bar_size_bits = 0; 731 732 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 733 &bar->pci_resource_id, RF_ACTIVE); 734 735 if (bar->pci_resource == NULL) 736 return (ENXIO); 737 738 save_bar_parameters(bar); 739 /* 740 * Ivytown NTB BAR sizes are misreported by the hardware due to a 741 * hardware issue. To work around this, query the size it should be 742 * configured to by the device and modify the resource to correspond to 743 * this new size. The BIOS on systems with this problem is required to 744 * provide enough address space to allow the driver to make this change 745 * safely. 746 * 747 * Ideally I could have just specified the size when I allocated the 748 * resource like: 749 * bus_alloc_resource(ntb->device, 750 * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, 751 * 1ul << bar_size_bits, RF_ACTIVE); 752 * but the PCI driver does not honor the size in this call, so we have 753 * to modify it after the fact. 754 */ 755 if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { 756 if (bar->pci_resource_id == PCIR_BAR(2)) 757 bar_size_bits = pci_read_config(ntb->device, 758 XEON_PBAR23SZ_OFFSET, 1); 759 else 760 bar_size_bits = pci_read_config(ntb->device, 761 XEON_PBAR45SZ_OFFSET, 1); 762 763 rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, 764 bar->pci_resource, bar->pbase, 765 bar->pbase + (1ul << bar_size_bits) - 1); 766 if (rc != 0) { 767 device_printf(ntb->device, 768 "unable to resize bar\n"); 769 return (rc); 770 } 771 772 save_bar_parameters(bar); 773 } 774 775 print_map_success(ntb, bar, "mw"); 776 if (g_ntb_enable_wc == 0) 777 return (0); 778 779 /* Mark bar region as write combining to improve performance. */ 780 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, 781 VM_MEMATTR_WRITE_COMBINING); 782 if (rc == 0) { 783 bar->mapped_wc = true; 784 device_printf(ntb->device, 785 "Marked BAR%d v:[%p-%p] p:[%p-%p] as " 786 "WRITE_COMBINING.\n", 787 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 788 (char *)bar->vbase + bar->size - 1, 789 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1)); 790 } else 791 device_printf(ntb->device, 792 "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " 793 "WRITE_COMBINING: %d\n", 794 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 795 (char *)bar->vbase + bar->size - 1, 796 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 797 rc); 798 /* Proceed anyway */ 799 return (0); 800} 801 802static void 803ntb_unmap_pci_bar(struct ntb_softc *ntb) 804{ 805 struct ntb_pci_bar_info *current_bar; 806 int i; 807 808 for (i = 0; i < NTB_MAX_BARS; i++) { 809 current_bar = &ntb->bar_info[i]; 810 if (current_bar->pci_resource != NULL) 811 bus_release_resource(ntb->device, SYS_RES_MEMORY, 812 current_bar->pci_resource_id, 813 current_bar->pci_resource); 814 } 815} 816 817static int 818ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) 819{ 820 uint32_t i; 821 int rc; 822 823 for (i = 0; i < num_vectors; i++) { 824 ntb->int_info[i].rid = i + 1; 825 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 826 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); 827 if (ntb->int_info[i].res == NULL) { 828 device_printf(ntb->device, 829 "bus_alloc_resource failed\n"); 830 return (ENOMEM); 831 } 832 ntb->int_info[i].tag = NULL; 833 ntb->allocated_interrupts++; 834 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 835 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, 836 &ntb->msix_vec[i], &ntb->int_info[i].tag); 837 if (rc != 0) { 838 device_printf(ntb->device, "bus_setup_intr failed\n"); 839 return (ENXIO); 840 } 841 } 842 return (0); 843} 844 845/* 846 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector 847 * cannot be allocated for each MSI-X message. JHB seems to think remapping 848 * should be okay. This tunable should enable us to test that hypothesis 849 * when someone gets their hands on some Xeon hardware. 850 */ 851static int ntb_force_remap_mode; 852SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, 853 &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" 854 " to a smaller number of ithreads, even if the desired number are " 855 "available"); 856 857/* 858 * In case it is NOT ok, give consumers an abort button. 859 */ 860static int ntb_prefer_intx; 861SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, 862 &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " 863 "than remapping MSI-X messages over available slots (match Linux driver " 864 "behavior)"); 865 866/* 867 * Remap the desired number of MSI-X messages to available ithreads in a simple 868 * round-robin fashion. 869 */ 870static int 871ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) 872{ 873 u_int *vectors; 874 uint32_t i; 875 int rc; 876 877 if (ntb_prefer_intx != 0) 878 return (ENXIO); 879 880 vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); 881 882 for (i = 0; i < desired; i++) 883 vectors[i] = (i % avail) + 1; 884 885 rc = pci_remap_msix(dev, desired, vectors); 886 free(vectors, M_NTB); 887 return (rc); 888} 889 890static int 891ntb_init_isr(struct ntb_softc *ntb) 892{ 893 uint32_t desired_vectors, num_vectors; 894 int rc; 895 896 ntb->allocated_interrupts = 0; 897 ntb->last_ts = ticks; 898 899 /* 900 * Mask all doorbell interrupts. 901 */ 902 ntb_db_set_mask(ntb, ntb->db_valid_mask); 903 904 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), 905 ntb->db_count); 906 if (desired_vectors >= 1) { 907 rc = pci_alloc_msix(ntb->device, &num_vectors); 908 909 if (ntb_force_remap_mode != 0 && rc == 0 && 910 num_vectors == desired_vectors) 911 num_vectors--; 912 913 if (rc == 0 && num_vectors < desired_vectors) { 914 rc = ntb_remap_msix(ntb->device, desired_vectors, 915 num_vectors); 916 if (rc == 0) 917 num_vectors = desired_vectors; 918 else 919 pci_release_msi(ntb->device); 920 } 921 if (rc != 0) 922 num_vectors = 1; 923 } else 924 num_vectors = 1; 925 926 if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { 927 ntb->db_vec_count = 1; 928 ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; 929 rc = ntb_setup_legacy_interrupt(ntb); 930 } else { 931 ntb_create_msix_vec(ntb, num_vectors); 932 rc = ntb_setup_msix(ntb, num_vectors); 933 } 934 if (rc != 0) { 935 device_printf(ntb->device, 936 "Error allocating interrupts: %d\n", rc); 937 ntb_free_msix_vec(ntb); 938 } 939 940 return (rc); 941} 942 943static int 944ntb_setup_legacy_interrupt(struct ntb_softc *ntb) 945{ 946 int rc; 947 948 ntb->int_info[0].rid = 0; 949 ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, 950 &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); 951 if (ntb->int_info[0].res == NULL) { 952 device_printf(ntb->device, "bus_alloc_resource failed\n"); 953 return (ENOMEM); 954 } 955 956 ntb->int_info[0].tag = NULL; 957 ntb->allocated_interrupts = 1; 958 959 rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, 960 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, 961 ntb, &ntb->int_info[0].tag); 962 if (rc != 0) { 963 device_printf(ntb->device, "bus_setup_intr failed\n"); 964 return (ENXIO); 965 } 966 967 return (0); 968} 969 970static void 971ntb_teardown_interrupts(struct ntb_softc *ntb) 972{ 973 struct ntb_int_info *current_int; 974 int i; 975 976 for (i = 0; i < ntb->allocated_interrupts; i++) { 977 current_int = &ntb->int_info[i]; 978 if (current_int->tag != NULL) 979 bus_teardown_intr(ntb->device, current_int->res, 980 current_int->tag); 981 982 if (current_int->res != NULL) 983 bus_release_resource(ntb->device, SYS_RES_IRQ, 984 rman_get_rid(current_int->res), current_int->res); 985 } 986 987 ntb_free_msix_vec(ntb); 988 pci_release_msi(ntb->device); 989} 990 991/* 992 * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon. Abstract it 993 * out to make code clearer. 994 */ 995static inline uint64_t 996db_ioread(struct ntb_softc *ntb, uint64_t regoff) 997{ 998 999 if (ntb->type == NTB_ATOM) 1000 return (ntb_reg_read(8, regoff)); 1001 1002 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1003 1004 return (ntb_reg_read(2, regoff)); 1005} 1006 1007static inline void 1008db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1009{ 1010 1011 KASSERT((val & ~ntb->db_valid_mask) == 0, 1012 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1013 (uintmax_t)(val & ~ntb->db_valid_mask), 1014 (uintmax_t)ntb->db_valid_mask)); 1015 1016 if (regoff == ntb->self_reg->db_mask) 1017 DB_MASK_ASSERT(ntb, MA_OWNED); 1018 db_iowrite_raw(ntb, regoff, val); 1019} 1020 1021static inline void 1022db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1023{ 1024 1025 if (ntb->type == NTB_ATOM) { 1026 ntb_reg_write(8, regoff, val); 1027 return; 1028 } 1029 1030 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1031 ntb_reg_write(2, regoff, (uint16_t)val); 1032} 1033 1034void 1035ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits) 1036{ 1037 1038 DB_MASK_LOCK(ntb); 1039 ntb->db_mask |= bits; 1040 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1041 DB_MASK_UNLOCK(ntb); 1042} 1043 1044void 1045ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits) 1046{ 1047 1048 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1049 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1050 (uintmax_t)(bits & ~ntb->db_valid_mask), 1051 (uintmax_t)ntb->db_valid_mask)); 1052 1053 DB_MASK_LOCK(ntb); 1054 ntb->db_mask &= ~bits; 1055 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1056 DB_MASK_UNLOCK(ntb); 1057} 1058 1059uint64_t 1060ntb_db_read(struct ntb_softc *ntb) 1061{ 1062 1063 return (db_ioread(ntb, ntb->self_reg->db_bell)); 1064} 1065 1066void 1067ntb_db_clear(struct ntb_softc *ntb, uint64_t bits) 1068{ 1069 1070 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1071 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1072 (uintmax_t)(bits & ~ntb->db_valid_mask), 1073 (uintmax_t)ntb->db_valid_mask)); 1074 1075 db_iowrite(ntb, ntb->self_reg->db_bell, bits); 1076} 1077 1078static inline uint64_t 1079ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) 1080{ 1081 uint64_t shift, mask; 1082 1083 shift = ntb->db_vec_shift; 1084 mask = (1ull << shift) - 1; 1085 return (mask << (shift * db_vector)); 1086} 1087 1088static void 1089ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) 1090{ 1091 uint64_t vec_mask; 1092 1093 ntb->last_ts = ticks; 1094 vec_mask = ntb_vec_mask(ntb, vec); 1095 1096 if ((vec_mask & ntb->db_link_mask) != 0) { 1097 if (ntb_poll_link(ntb)) 1098 ntb_link_event(ntb); 1099 } 1100 1101 if ((vec_mask & ntb->db_valid_mask) != 0) 1102 ntb_db_event(ntb, vec); 1103} 1104 1105static void 1106ndev_vec_isr(void *arg) 1107{ 1108 struct ntb_vec *nvec = arg; 1109 1110 ntb_interrupt(nvec->ntb, nvec->num); 1111} 1112 1113static void 1114ndev_irq_isr(void *arg) 1115{ 1116 /* If we couldn't set up MSI-X, we only have the one vector. */ 1117 ntb_interrupt(arg, 0); 1118} 1119 1120static int 1121ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) 1122{ 1123 uint32_t i; 1124 1125 ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, 1126 M_ZERO | M_WAITOK); 1127 for (i = 0; i < num_vectors; i++) { 1128 ntb->msix_vec[i].num = i; 1129 ntb->msix_vec[i].ntb = ntb; 1130 } 1131 1132 return (0); 1133} 1134 1135static void 1136ntb_free_msix_vec(struct ntb_softc *ntb) 1137{ 1138 1139 if (ntb->msix_vec == NULL) 1140 return; 1141 1142 free(ntb->msix_vec, M_NTB); 1143 ntb->msix_vec = NULL; 1144} 1145 1146static struct ntb_hw_info * 1147ntb_get_device_info(uint32_t device_id) 1148{ 1149 struct ntb_hw_info *ep = pci_ids; 1150 1151 while (ep->device_id) { 1152 if (ep->device_id == device_id) 1153 return (ep); 1154 ++ep; 1155 } 1156 return (NULL); 1157} 1158 1159static void 1160ntb_teardown_xeon(struct ntb_softc *ntb) 1161{ 1162 1163 if (ntb->reg != NULL) 1164 ntb_link_disable(ntb); 1165} 1166 1167static void 1168ntb_detect_max_mw(struct ntb_softc *ntb) 1169{ 1170 1171 if (ntb->type == NTB_ATOM) { 1172 ntb->mw_count = ATOM_MW_COUNT; 1173 return; 1174 } 1175 1176 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1177 ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; 1178 else 1179 ntb->mw_count = XEON_SNB_MW_COUNT; 1180} 1181 1182static int 1183ntb_detect_xeon(struct ntb_softc *ntb) 1184{ 1185 uint8_t ppd, conn_type; 1186 1187 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); 1188 ntb->ppd = ppd; 1189 1190 if ((ppd & XEON_PPD_DEV_TYPE) != 0) 1191 ntb->dev_type = NTB_DEV_DSD; 1192 else 1193 ntb->dev_type = NTB_DEV_USD; 1194 1195 if ((ppd & XEON_PPD_SPLIT_BAR) != 0) 1196 ntb->features |= NTB_SPLIT_BAR; 1197 1198 /* SB01BASE_LOCKUP errata is a superset of SDOORBELL errata */ 1199 if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) 1200 ntb->features |= NTB_SDOORBELL_LOCKUP; 1201 1202 conn_type = ppd & XEON_PPD_CONN_TYPE; 1203 switch (conn_type) { 1204 case NTB_CONN_B2B: 1205 ntb->conn_type = conn_type; 1206 break; 1207 case NTB_CONN_RP: 1208 case NTB_CONN_TRANSPARENT: 1209 default: 1210 device_printf(ntb->device, "Unsupported connection type: %u\n", 1211 (unsigned)conn_type); 1212 return (ENXIO); 1213 } 1214 return (0); 1215} 1216 1217static int 1218ntb_detect_atom(struct ntb_softc *ntb) 1219{ 1220 uint32_t ppd, conn_type; 1221 1222 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); 1223 ntb->ppd = ppd; 1224 1225 if ((ppd & ATOM_PPD_DEV_TYPE) != 0) 1226 ntb->dev_type = NTB_DEV_DSD; 1227 else 1228 ntb->dev_type = NTB_DEV_USD; 1229 1230 conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; 1231 switch (conn_type) { 1232 case NTB_CONN_B2B: 1233 ntb->conn_type = conn_type; 1234 break; 1235 default: 1236 device_printf(ntb->device, "Unsupported NTB configuration\n"); 1237 return (ENXIO); 1238 } 1239 return (0); 1240} 1241 1242static int 1243ntb_xeon_init_dev(struct ntb_softc *ntb) 1244{ 1245 int rc; 1246 1247 ntb->spad_count = XEON_SPAD_COUNT; 1248 ntb->db_count = XEON_DB_COUNT; 1249 ntb->db_link_mask = XEON_DB_LINK_BIT; 1250 ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; 1251 ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; 1252 1253 if (ntb->conn_type != NTB_CONN_B2B) { 1254 device_printf(ntb->device, "Connection type %d not supported\n", 1255 ntb->conn_type); 1256 return (ENXIO); 1257 } 1258 1259 ntb->reg = &xeon_reg; 1260 ntb->self_reg = &xeon_pri_reg; 1261 ntb->peer_reg = &xeon_b2b_reg; 1262 ntb->xlat_reg = &xeon_sec_xlat; 1263 1264 /* 1265 * There is a Xeon hardware errata related to writes to SDOORBELL or 1266 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, 1267 * which may hang the system. To workaround this use the second memory 1268 * window to access the interrupt and scratch pad registers on the 1269 * remote system. 1270 */ 1271 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 1272 /* Use the last MW for mapping remote spad */ 1273 ntb->b2b_mw_idx = ntb->mw_count - 1; 1274 else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14)) 1275 /* 1276 * HW Errata on bit 14 of b2bdoorbell register. Writes will not be 1277 * mirrored to the remote system. Shrink the number of bits by one, 1278 * since bit 14 is the last bit. 1279 * 1280 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register 1281 * anyway. Nor for non-B2B connection types. 1282 */ 1283 ntb->db_count = XEON_DB_COUNT - 1; 1284 1285 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1286 1287 if (ntb->dev_type == NTB_DEV_USD) 1288 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, 1289 &xeon_b2b_usd_addr); 1290 else 1291 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, 1292 &xeon_b2b_dsd_addr); 1293 if (rc != 0) 1294 return (rc); 1295 1296 /* Enable Bus Master and Memory Space on the secondary side */ 1297 ntb_reg_write(2, XEON_SPCICMD_OFFSET, 1298 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1299 1300 /* 1301 * Mask all doorbell interrupts. 1302 */ 1303 ntb_db_set_mask(ntb, ntb->db_valid_mask); 1304 1305 rc = ntb_init_isr(ntb); 1306 return (rc); 1307} 1308 1309static int 1310ntb_atom_init_dev(struct ntb_softc *ntb) 1311{ 1312 int error; 1313 1314 KASSERT(ntb->conn_type == NTB_CONN_B2B, 1315 ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); 1316 1317 ntb->spad_count = ATOM_SPAD_COUNT; 1318 ntb->db_count = ATOM_DB_COUNT; 1319 ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; 1320 ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; 1321 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1322 1323 ntb->reg = &atom_reg; 1324 ntb->self_reg = &atom_pri_reg; 1325 ntb->peer_reg = &atom_b2b_reg; 1326 ntb->xlat_reg = &atom_sec_xlat; 1327 1328 /* 1329 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is 1330 * resolved. Mask transaction layer internal parity errors. 1331 */ 1332 pci_write_config(ntb->device, 0xFC, 0x4, 4); 1333 1334 configure_atom_secondary_side_bars(ntb); 1335 1336 /* Enable Bus Master and Memory Space on the secondary side */ 1337 ntb_reg_write(2, ATOM_SPCICMD_OFFSET, 1338 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1339 1340 error = ntb_init_isr(ntb); 1341 if (error != 0) 1342 return (error); 1343 1344 /* Initiate PCI-E link training */ 1345 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1346 1347 callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); 1348 1349 return (0); 1350} 1351 1352/* XXX: Linux driver doesn't seem to do any of this for Atom. */ 1353static void 1354configure_atom_secondary_side_bars(struct ntb_softc *ntb) 1355{ 1356 1357 if (ntb->dev_type == NTB_DEV_USD) { 1358 ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1359 XEON_B2B_BAR2_ADDR64); 1360 ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1361 XEON_B2B_BAR4_ADDR64); 1362 ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1363 ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1364 } else { 1365 ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1366 XEON_B2B_BAR2_ADDR64); 1367 ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1368 XEON_B2B_BAR4_ADDR64); 1369 ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1370 ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1371 } 1372} 1373 1374 1375/* 1376 * When working around Xeon SDOORBELL errata by remapping remote registers in a 1377 * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW 1378 * remains for use by a higher layer. 1379 * 1380 * Will only be used if working around SDOORBELL errata and the BIOS-configured 1381 * MW size is sufficiently large. 1382 */ 1383static unsigned int ntb_b2b_mw_share; 1384SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 1385 0, "If enabled (non-zero), prefer to share half of the B2B peer register " 1386 "MW with higher level consumers. Both sides of the NTB MUST set the same " 1387 "value here."); 1388 1389static void 1390xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, 1391 enum ntb_bar regbar) 1392{ 1393 struct ntb_pci_bar_info *bar; 1394 uint8_t bar_sz; 1395 1396 if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) 1397 return; 1398 1399 bar = &ntb->bar_info[idx]; 1400 bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); 1401 if (idx == regbar) { 1402 if (ntb->b2b_off != 0) 1403 bar_sz--; 1404 else 1405 bar_sz = 0; 1406 } 1407 pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); 1408 bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); 1409 (void)bar_sz; 1410} 1411 1412static void 1413xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, 1414 enum ntb_bar idx, enum ntb_bar regbar) 1415{ 1416 uint64_t reg_val; 1417 uint32_t base_reg, lmt_reg; 1418 1419 bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); 1420 if (idx == regbar) 1421 bar_addr += ntb->b2b_off; 1422 1423 if (!bar_is_64bit(ntb, idx)) { 1424 ntb_reg_write(4, base_reg, bar_addr); 1425 reg_val = ntb_reg_read(4, base_reg); 1426 (void)reg_val; 1427 1428 ntb_reg_write(4, lmt_reg, bar_addr); 1429 reg_val = ntb_reg_read(4, lmt_reg); 1430 (void)reg_val; 1431 } else { 1432 ntb_reg_write(8, base_reg, bar_addr); 1433 reg_val = ntb_reg_read(8, base_reg); 1434 (void)reg_val; 1435 1436 ntb_reg_write(8, lmt_reg, bar_addr); 1437 reg_val = ntb_reg_read(8, lmt_reg); 1438 (void)reg_val; 1439 } 1440} 1441 1442static void 1443xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) 1444{ 1445 struct ntb_pci_bar_info *bar; 1446 1447 bar = &ntb->bar_info[idx]; 1448 if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { 1449 ntb_reg_write(4, bar->pbarxlat_off, base_addr); 1450 base_addr = ntb_reg_read(4, bar->pbarxlat_off); 1451 } else { 1452 ntb_reg_write(8, bar->pbarxlat_off, base_addr); 1453 base_addr = ntb_reg_read(8, bar->pbarxlat_off); 1454 } 1455 (void)base_addr; 1456} 1457 1458static int 1459xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, 1460 const struct ntb_b2b_addr *peer_addr) 1461{ 1462 struct ntb_pci_bar_info *b2b_bar; 1463 vm_size_t bar_size; 1464 uint64_t bar_addr; 1465 enum ntb_bar b2b_bar_num, i; 1466 1467 if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { 1468 b2b_bar = NULL; 1469 b2b_bar_num = NTB_CONFIG_BAR; 1470 ntb->b2b_off = 0; 1471 } else { 1472 b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); 1473 KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, 1474 ("invalid b2b mw bar")); 1475 1476 b2b_bar = &ntb->bar_info[b2b_bar_num]; 1477 bar_size = b2b_bar->size; 1478 1479 if (ntb_b2b_mw_share != 0 && 1480 (bar_size >> 1) >= XEON_B2B_MIN_SIZE) 1481 ntb->b2b_off = bar_size >> 1; 1482 else if (bar_size >= XEON_B2B_MIN_SIZE) { 1483 ntb->b2b_off = 0; 1484 ntb->mw_count--; 1485 } else { 1486 device_printf(ntb->device, 1487 "B2B bar size is too small!\n"); 1488 return (EIO); 1489 } 1490 } 1491 1492 /* 1493 * Reset the secondary bar sizes to match the primary bar sizes. 1494 * (Except, disable or halve the size of the B2B secondary bar.) 1495 */ 1496 for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) 1497 xeon_reset_sbar_size(ntb, i, b2b_bar_num); 1498 1499 bar_addr = 0; 1500 if (b2b_bar_num == NTB_CONFIG_BAR) 1501 bar_addr = addr->bar0_addr; 1502 else if (b2b_bar_num == NTB_B2B_BAR_1) 1503 bar_addr = addr->bar2_addr64; 1504 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) 1505 bar_addr = addr->bar4_addr64; 1506 else if (b2b_bar_num == NTB_B2B_BAR_2) 1507 bar_addr = addr->bar4_addr32; 1508 else if (b2b_bar_num == NTB_B2B_BAR_3) 1509 bar_addr = addr->bar5_addr32; 1510 else 1511 KASSERT(false, ("invalid bar")); 1512 1513 ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); 1514 1515 /* 1516 * Other SBARs are normally hit by the PBAR xlat, except for the b2b 1517 * register BAR. The B2B BAR is either disabled above or configured 1518 * half-size. It starts at PBAR xlat + offset. 1519 * 1520 * Also set up incoming BAR limits == base (zero length window). 1521 */ 1522 xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, 1523 b2b_bar_num); 1524 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1525 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, 1526 NTB_B2B_BAR_2, b2b_bar_num); 1527 xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, 1528 NTB_B2B_BAR_3, b2b_bar_num); 1529 } else 1530 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, 1531 NTB_B2B_BAR_2, b2b_bar_num); 1532 1533 /* Zero incoming translation addrs */ 1534 ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); 1535 ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); 1536 1537 /* Zero outgoing translation limits (whole bar size windows) */ 1538 ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); 1539 ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); 1540 1541 /* Set outgoing translation offsets */ 1542 xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); 1543 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1544 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); 1545 xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); 1546 } else 1547 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); 1548 1549 /* Set the translation offset for B2B registers */ 1550 bar_addr = 0; 1551 if (b2b_bar_num == NTB_CONFIG_BAR) 1552 bar_addr = peer_addr->bar0_addr; 1553 else if (b2b_bar_num == NTB_B2B_BAR_1) 1554 bar_addr = peer_addr->bar2_addr64; 1555 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) 1556 bar_addr = peer_addr->bar4_addr64; 1557 else if (b2b_bar_num == NTB_B2B_BAR_2) 1558 bar_addr = peer_addr->bar4_addr32; 1559 else if (b2b_bar_num == NTB_B2B_BAR_3) 1560 bar_addr = peer_addr->bar5_addr32; 1561 else 1562 KASSERT(false, ("invalid bar")); 1563 1564 /* 1565 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits 1566 * at a time. 1567 */ 1568 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); 1569 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); 1570 return (0); 1571} 1572 1573static inline bool 1574link_is_up(struct ntb_softc *ntb) 1575{ 1576 1577 if (ntb->type == NTB_XEON) { 1578 if (ntb->conn_type == NTB_CONN_TRANSPARENT) 1579 return (true); 1580 return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); 1581 } 1582 1583 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1584 return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); 1585} 1586 1587static inline bool 1588atom_link_is_err(struct ntb_softc *ntb) 1589{ 1590 uint32_t status; 1591 1592 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1593 1594 status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1595 if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) 1596 return (true); 1597 1598 status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1599 return ((status & ATOM_IBIST_ERR_OFLOW) != 0); 1600} 1601 1602/* Atom does not have link status interrupt, poll on that platform */ 1603static void 1604atom_link_hb(void *arg) 1605{ 1606 struct ntb_softc *ntb = arg; 1607 sbintime_t timo, poll_ts; 1608 1609 timo = NTB_HB_TIMEOUT * hz; 1610 poll_ts = ntb->last_ts + timo; 1611 1612 /* 1613 * Delay polling the link status if an interrupt was received, unless 1614 * the cached link status says the link is down. 1615 */ 1616 if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { 1617 timo = poll_ts - ticks; 1618 goto out; 1619 } 1620 1621 if (ntb_poll_link(ntb)) 1622 ntb_link_event(ntb); 1623 1624 if (!link_is_up(ntb) && atom_link_is_err(ntb)) { 1625 /* Link is down with error, proceed with recovery */ 1626 callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); 1627 return; 1628 } 1629 1630out: 1631 callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); 1632} 1633 1634static void 1635atom_perform_link_restart(struct ntb_softc *ntb) 1636{ 1637 uint32_t status; 1638 1639 /* Driver resets the NTB ModPhy lanes - magic! */ 1640 ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); 1641 ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); 1642 ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); 1643 ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); 1644 1645 /* Driver waits 100ms to allow the NTB ModPhy to settle */ 1646 pause("ModPhy", hz / 10); 1647 1648 /* Clear AER Errors, write to clear */ 1649 status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); 1650 status &= PCIM_AER_COR_REPLAY_ROLLOVER; 1651 ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); 1652 1653 /* Clear unexpected electrical idle event in LTSSM, write to clear */ 1654 status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); 1655 status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; 1656 ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); 1657 1658 /* Clear DeSkew Buffer error, write to clear */ 1659 status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); 1660 status |= ATOM_DESKEWSTS_DBERR; 1661 ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); 1662 1663 status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1664 status &= ATOM_IBIST_ERR_OFLOW; 1665 ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); 1666 1667 /* Releases the NTB state machine to allow the link to retrain */ 1668 status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1669 status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; 1670 ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); 1671} 1672 1673/* 1674 * ntb_set_ctx() - associate a driver context with an ntb device 1675 * @ntb: NTB device context 1676 * @ctx: Driver context 1677 * @ctx_ops: Driver context operations 1678 * 1679 * Associate a driver context and operations with a ntb device. The context is 1680 * provided by the client driver, and the driver may associate a different 1681 * context with each ntb device. 1682 * 1683 * Return: Zero if the context is associated, otherwise an error number. 1684 */ 1685int 1686ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops) 1687{ 1688 1689 if (ctx == NULL || ops == NULL) 1690 return (EINVAL); 1691 if (ntb->ctx_ops != NULL) 1692 return (EINVAL); 1693 1694 CTX_LOCK(ntb); 1695 if (ntb->ctx_ops != NULL) { 1696 CTX_UNLOCK(ntb); 1697 return (EINVAL); 1698 } 1699 ntb->ntb_ctx = ctx; 1700 ntb->ctx_ops = ops; 1701 CTX_UNLOCK(ntb); 1702 1703 return (0); 1704} 1705 1706/* 1707 * It is expected that this will only be used from contexts where the ctx_lock 1708 * is not needed to protect ntb_ctx lifetime. 1709 */ 1710void * 1711ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops) 1712{ 1713 1714 KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus")); 1715 if (ops != NULL) 1716 *ops = ntb->ctx_ops; 1717 return (ntb->ntb_ctx); 1718} 1719 1720/* 1721 * ntb_clear_ctx() - disassociate any driver context from an ntb device 1722 * @ntb: NTB device context 1723 * 1724 * Clear any association that may exist between a driver context and the ntb 1725 * device. 1726 */ 1727void 1728ntb_clear_ctx(struct ntb_softc *ntb) 1729{ 1730 1731 CTX_LOCK(ntb); 1732 ntb->ntb_ctx = NULL; 1733 ntb->ctx_ops = NULL; 1734 CTX_UNLOCK(ntb); 1735} 1736 1737/* 1738 * ntb_link_event() - notify driver context of a change in link status 1739 * @ntb: NTB device context 1740 * 1741 * Notify the driver context that the link status may have changed. The driver 1742 * should call ntb_link_is_up() to get the current status. 1743 */ 1744void 1745ntb_link_event(struct ntb_softc *ntb) 1746{ 1747 1748 CTX_LOCK(ntb); 1749 if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL) 1750 ntb->ctx_ops->link_event(ntb->ntb_ctx); 1751 CTX_UNLOCK(ntb); 1752} 1753 1754/* 1755 * ntb_db_event() - notify driver context of a doorbell event 1756 * @ntb: NTB device context 1757 * @vector: Interrupt vector number 1758 * 1759 * Notify the driver context of a doorbell event. If hardware supports 1760 * multiple interrupt vectors for doorbells, the vector number indicates which 1761 * vector received the interrupt. The vector number is relative to the first 1762 * vector used for doorbells, starting at zero, and must be less than 1763 * ntb_db_vector_count(). The driver may call ntb_db_read() to check which 1764 * doorbell bits need service, and ntb_db_vector_mask() to determine which of 1765 * those bits are associated with the vector number. 1766 */ 1767static void 1768ntb_db_event(struct ntb_softc *ntb, uint32_t vec) 1769{ 1770 1771 CTX_LOCK(ntb); 1772 if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL) 1773 ntb->ctx_ops->db_event(ntb->ntb_ctx, vec); 1774 CTX_UNLOCK(ntb); 1775} 1776 1777/* 1778 * ntb_link_enable() - enable the link on the secondary side of the ntb 1779 * @ntb: NTB device context 1780 * @max_speed: The maximum link speed expressed as PCIe generation number[0] 1781 * @max_width: The maximum link width expressed as the number of PCIe lanes[0] 1782 * 1783 * Enable the link on the secondary side of the ntb. This can only be done 1784 * from the primary side of the ntb in primary or b2b topology. The ntb device 1785 * should train the link to its maximum speed and width, or the requested speed 1786 * and width, whichever is smaller, if supported. 1787 * 1788 * Return: Zero on success, otherwise an error number. 1789 * 1790 * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed 1791 * and width input will be ignored. 1792 */ 1793int 1794ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused, 1795 enum ntb_width w __unused) 1796{ 1797 uint32_t cntl; 1798 1799 if (ntb->type == NTB_ATOM) { 1800 pci_write_config(ntb->device, NTB_PPD_OFFSET, 1801 ntb->ppd | ATOM_PPD_INIT_LINK, 4); 1802 return (0); 1803 } 1804 1805 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1806 ntb_link_event(ntb); 1807 return (0); 1808 } 1809 1810 cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1811 cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); 1812 cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; 1813 cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; 1814 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1815 cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; 1816 ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 1817 return (0); 1818} 1819 1820/* 1821 * ntb_link_disable() - disable the link on the secondary side of the ntb 1822 * @ntb: NTB device context 1823 * 1824 * Disable the link on the secondary side of the ntb. This can only be done 1825 * from the primary side of the ntb in primary or b2b topology. The ntb device 1826 * should disable the link. Returning from this call must indicate that a 1827 * barrier has passed, though with no more writes may pass in either direction 1828 * across the link, except if this call returns an error number. 1829 * 1830 * Return: Zero on success, otherwise an error number. 1831 */ 1832int 1833ntb_link_disable(struct ntb_softc *ntb) 1834{ 1835 uint32_t cntl; 1836 1837 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1838 ntb_link_event(ntb); 1839 return (0); 1840 } 1841 1842 cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1843 cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); 1844 cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); 1845 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1846 cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); 1847 cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; 1848 ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 1849 return (0); 1850} 1851 1852static void 1853recover_atom_link(void *arg) 1854{ 1855 struct ntb_softc *ntb = arg; 1856 unsigned speed, width, oldspeed, oldwidth; 1857 uint32_t status32; 1858 1859 atom_perform_link_restart(ntb); 1860 1861 /* 1862 * There is a potential race between the 2 NTB devices recovering at 1863 * the same time. If the times are the same, the link will not recover 1864 * and the driver will be stuck in this loop forever. Add a random 1865 * interval to the recovery time to prevent this race. 1866 */ 1867 status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; 1868 pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); 1869 1870 if (atom_link_is_err(ntb)) 1871 goto retry; 1872 1873 status32 = ntb_reg_read(4, ntb->reg->ntb_ctl); 1874 if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) 1875 goto out; 1876 1877 status32 = ntb_reg_read(4, ntb->reg->lnk_sta); 1878 width = NTB_LNK_STA_WIDTH(status32); 1879 speed = status32 & NTB_LINK_SPEED_MASK; 1880 1881 oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); 1882 oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; 1883 if (oldwidth != width || oldspeed != speed) 1884 goto retry; 1885 1886out: 1887 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, 1888 ntb); 1889 return; 1890 1891retry: 1892 callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, 1893 ntb); 1894} 1895 1896/* 1897 * Polls the HW link status register(s); returns true if something has changed. 1898 */ 1899static bool 1900ntb_poll_link(struct ntb_softc *ntb) 1901{ 1902 uint32_t ntb_cntl; 1903 uint16_t reg_val; 1904 1905 if (ntb->type == NTB_ATOM) { 1906 ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1907 if (ntb_cntl == ntb->ntb_ctl) 1908 return (false); 1909 1910 ntb->ntb_ctl = ntb_cntl; 1911 ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta); 1912 } else { 1913 db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); 1914 1915 reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); 1916 if (reg_val == ntb->lnk_sta) 1917 return (false); 1918 1919 ntb->lnk_sta = reg_val; 1920 } 1921 return (true); 1922} 1923 1924static inline enum ntb_speed 1925ntb_link_sta_speed(struct ntb_softc *ntb) 1926{ 1927 1928 if (!link_is_up(ntb)) 1929 return (NTB_SPEED_NONE); 1930 return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); 1931} 1932 1933static inline enum ntb_width 1934ntb_link_sta_width(struct ntb_softc *ntb) 1935{ 1936 1937 if (!link_is_up(ntb)) 1938 return (NTB_WIDTH_NONE); 1939 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 1940} 1941 1942SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0, 1943 "Driver state, statistics, and HW registers"); 1944 1945#define NTB_REGSZ_MASK (3ul << 30) 1946#define NTB_REG_64 (1ul << 30) 1947#define NTB_REG_32 (2ul << 30) 1948#define NTB_REG_16 (3ul << 30) 1949#define NTB_REG_8 (0ul << 30) 1950 1951#define NTB_DB_READ (1ul << 29) 1952#define NTB_PCI_REG (1ul << 28) 1953#define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) 1954 1955static void 1956ntb_sysctl_init(struct ntb_softc *ntb) 1957{ 1958 struct sysctl_oid_list *tree_par, *regpar, *statpar, *errpar; 1959 struct sysctl_ctx_list *ctx; 1960 struct sysctl_oid *tree, *tmptree; 1961 1962 ctx = device_get_sysctl_ctx(ntb->device); 1963 1964 tree = SYSCTL_ADD_NODE(ctx, 1965 SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)), OID_AUTO, 1966 "debug_info", CTLFLAG_RD, NULL, 1967 "Driver state, statistics, and HW registers"); 1968 tree_par = SYSCTL_CHILDREN(tree); 1969 1970 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, 1971 &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); 1972 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, 1973 &ntb->dev_type, 0, "0 - USD; 1 - DSD"); 1974 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, 1975 &ntb->ppd, 0, "Raw PPD register (cached)"); 1976 1977 if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { 1978 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, 1979 &ntb->b2b_mw_idx, 0, 1980 "Index of the MW used for B2B remote register access"); 1981 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", 1982 CTLFLAG_RD, &ntb->b2b_off, 1983 "If non-zero, offset of B2B register region in shared MW"); 1984 } 1985 1986 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", 1987 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A", 1988 "Features/errata of this NTB device"); 1989 1990 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, 1991 __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, 1992 "NTB CTL register (cached)"); 1993 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, 1994 __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, 1995 "LNK STA register (cached)"); 1996 1997 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "link_status", 1998 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_link_status, 1999 "A", "Link status"); 2000 2001 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, 2002 &ntb->mw_count, 0, "MW count (excl. non-shared B2B register BAR)"); 2003 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, 2004 &ntb->spad_count, 0, "Scratchpad count"); 2005 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, 2006 &ntb->db_count, 0, "Doorbell count"); 2007 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, 2008 &ntb->db_vec_count, 0, "Doorbell vector count"); 2009 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, 2010 &ntb->db_vec_shift, 0, "Doorbell vector shift"); 2011 2012 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, 2013 &ntb->db_valid_mask, "Doorbell valid mask"); 2014 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, 2015 &ntb->db_link_mask, "Doorbell link mask"); 2016 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, 2017 &ntb->db_mask, "Doorbell mask (cached)"); 2018 2019 tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", 2020 CTLFLAG_RD, NULL, "Raw HW registers (big-endian)"); 2021 regpar = SYSCTL_CHILDREN(tmptree); 2022 2023 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", 2024 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2025 ntb->reg->ntb_ctl, sysctl_handle_register, "IU", 2026 "NTB Control register"); 2027 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", 2028 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2029 0x19c, sysctl_handle_register, "IU", 2030 "NTB Link Capabilities"); 2031 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", 2032 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2033 0x1a0, sysctl_handle_register, "IU", 2034 "NTB Link Control register"); 2035 2036 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", 2037 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2038 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, 2039 sysctl_handle_register, "QU", "Doorbell mask register"); 2040 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", 2041 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2042 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, 2043 sysctl_handle_register, "QU", "Doorbell register"); 2044 2045 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", 2046 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2047 NTB_REG_64 | ntb->xlat_reg->bar2_xlat, 2048 sysctl_handle_register, "QU", "Incoming XLAT23 register"); 2049 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2050 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", 2051 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2052 NTB_REG_32 | ntb->xlat_reg->bar4_xlat, 2053 sysctl_handle_register, "IU", "Incoming XLAT4 register"); 2054 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", 2055 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2056 NTB_REG_32 | ntb->xlat_reg->bar5_xlat, 2057 sysctl_handle_register, "IU", "Incoming XLAT5 register"); 2058 } else { 2059 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", 2060 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2061 NTB_REG_64 | ntb->xlat_reg->bar4_xlat, 2062 sysctl_handle_register, "QU", "Incoming XLAT45 register"); 2063 } 2064 2065 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", 2066 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2067 NTB_REG_64 | ntb->xlat_reg->bar2_limit, 2068 sysctl_handle_register, "QU", "Incoming LMT23 register"); 2069 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2070 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", 2071 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2072 NTB_REG_32 | ntb->xlat_reg->bar4_limit, 2073 sysctl_handle_register, "IU", "Incoming LMT4 register"); 2074 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", 2075 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2076 NTB_REG_32 | ntb->xlat_reg->bar5_limit, 2077 sysctl_handle_register, "IU", "Incoming LMT5 register"); 2078 } else { 2079 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", 2080 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2081 NTB_REG_64 | ntb->xlat_reg->bar4_limit, 2082 sysctl_handle_register, "QU", "Incoming LMT45 register"); 2083 } 2084 2085 if (ntb->type == NTB_ATOM) 2086 return; 2087 2088 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", 2089 CTLFLAG_RD, NULL, "Xeon HW statistics"); 2090 statpar = SYSCTL_CHILDREN(tmptree); 2091 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", 2092 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2093 NTB_REG_16 | XEON_USMEMMISS_OFFSET, 2094 sysctl_handle_register, "SU", "Upstream Memory Miss"); 2095 2096 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", 2097 CTLFLAG_RD, NULL, "Xeon HW errors"); 2098 errpar = SYSCTL_CHILDREN(tmptree); 2099 2100 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", 2101 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2102 NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, 2103 sysctl_handle_register, "CU", "PPD"); 2104 2105 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", 2106 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2107 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, 2108 sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); 2109 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", 2110 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2111 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, 2112 sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); 2113 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", 2114 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2115 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, 2116 sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); 2117 2118 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", 2119 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2120 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, 2121 sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); 2122 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", 2123 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2124 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, 2125 sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); 2126 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", 2127 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2128 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, 2129 sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); 2130 2131 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", 2132 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2133 NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, 2134 sysctl_handle_register, "SU", "DEVSTS"); 2135 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", 2136 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2137 NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, 2138 sysctl_handle_register, "SU", "LNKSTS"); 2139 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", 2140 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2141 NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, 2142 sysctl_handle_register, "SU", "SLNKSTS"); 2143 2144 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", 2145 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2146 NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, 2147 sysctl_handle_register, "IU", "UNCERRSTS"); 2148 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", 2149 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2150 NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, 2151 sysctl_handle_register, "IU", "CORERRSTS"); 2152 2153 if (ntb->conn_type != NTB_CONN_B2B) 2154 return; 2155 2156 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", 2157 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2158 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, 2159 sysctl_handle_register, "QU", "Outgoing XLAT23 register"); 2160 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2161 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", 2162 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2163 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2164 sysctl_handle_register, "IU", "Outgoing XLAT4 register"); 2165 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", 2166 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2167 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, 2168 sysctl_handle_register, "IU", "Outgoing XLAT5 register"); 2169 } else { 2170 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", 2171 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2172 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2173 sysctl_handle_register, "QU", "Outgoing XLAT45 register"); 2174 } 2175 2176 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", 2177 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2178 NTB_REG_64 | XEON_PBAR2LMT_OFFSET, 2179 sysctl_handle_register, "QU", "Outgoing LMT23 register"); 2180 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2181 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", 2182 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2183 NTB_REG_32 | XEON_PBAR4LMT_OFFSET, 2184 sysctl_handle_register, "IU", "Outgoing LMT4 register"); 2185 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", 2186 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2187 NTB_REG_32 | XEON_PBAR5LMT_OFFSET, 2188 sysctl_handle_register, "IU", "Outgoing LMT5 register"); 2189 } else { 2190 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", 2191 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2192 NTB_REG_64 | XEON_PBAR4LMT_OFFSET, 2193 sysctl_handle_register, "QU", "Outgoing LMT45 register"); 2194 } 2195 2196 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", 2197 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2198 NTB_REG_64 | ntb->xlat_reg->bar0_base, 2199 sysctl_handle_register, "QU", "Secondary BAR01 base register"); 2200 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", 2201 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2202 NTB_REG_64 | ntb->xlat_reg->bar2_base, 2203 sysctl_handle_register, "QU", "Secondary BAR23 base register"); 2204 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2205 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", 2206 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2207 NTB_REG_32 | ntb->xlat_reg->bar4_base, 2208 sysctl_handle_register, "IU", 2209 "Secondary BAR4 base register"); 2210 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", 2211 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2212 NTB_REG_32 | ntb->xlat_reg->bar5_base, 2213 sysctl_handle_register, "IU", 2214 "Secondary BAR5 base register"); 2215 } else { 2216 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", 2217 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2218 NTB_REG_64 | ntb->xlat_reg->bar4_base, 2219 sysctl_handle_register, "QU", 2220 "Secondary BAR45 base register"); 2221 } 2222} 2223 2224static int 2225sysctl_handle_features(SYSCTL_HANDLER_ARGS) 2226{ 2227 struct ntb_softc *ntb; 2228 struct sbuf sb; 2229 int error; 2230 2231 error = 0; 2232 ntb = arg1; 2233 2234 sbuf_new_for_sysctl(&sb, NULL, 256, req); 2235 2236 sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); 2237 error = sbuf_finish(&sb); 2238 sbuf_delete(&sb); 2239 2240 if (error || !req->newptr) 2241 return (error); 2242 return (EINVAL); 2243} 2244 2245static int 2246sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) 2247{ 2248 struct ntb_softc *ntb; 2249 struct sbuf sb; 2250 enum ntb_speed speed; 2251 enum ntb_width width; 2252 int error; 2253 2254 error = 0; 2255 ntb = arg1; 2256 2257 sbuf_new_for_sysctl(&sb, NULL, 32, req); 2258 2259 if (ntb_link_is_up(ntb, &speed, &width)) 2260 sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", 2261 (unsigned)speed, (unsigned)width); 2262 else 2263 sbuf_printf(&sb, "down"); 2264 2265 error = sbuf_finish(&sb); 2266 sbuf_delete(&sb); 2267 2268 if (error || !req->newptr) 2269 return (error); 2270 return (EINVAL); 2271} 2272 2273static int 2274sysctl_handle_register(SYSCTL_HANDLER_ARGS) 2275{ 2276 struct ntb_softc *ntb; 2277 const void *outp; 2278 uintptr_t sz; 2279 uint64_t umv; 2280 char be[sizeof(umv)]; 2281 size_t outsz; 2282 uint32_t reg; 2283 bool db, pci; 2284 int error; 2285 2286 ntb = arg1; 2287 reg = arg2 & ~NTB_REGFLAGS_MASK; 2288 sz = arg2 & NTB_REGSZ_MASK; 2289 db = (arg2 & NTB_DB_READ) != 0; 2290 pci = (arg2 & NTB_PCI_REG) != 0; 2291 2292 KASSERT(!(db && pci), ("bogus")); 2293 2294 if (db) { 2295 KASSERT(sz == NTB_REG_64, ("bogus")); 2296 umv = db_ioread(ntb, reg); 2297 outsz = sizeof(uint64_t); 2298 } else { 2299 switch (sz) { 2300 case NTB_REG_64: 2301 if (pci) 2302 umv = pci_read_config(ntb->device, reg, 8); 2303 else 2304 umv = ntb_reg_read(8, reg); 2305 outsz = sizeof(uint64_t); 2306 break; 2307 case NTB_REG_32: 2308 if (pci) 2309 umv = pci_read_config(ntb->device, reg, 4); 2310 else 2311 umv = ntb_reg_read(4, reg); 2312 outsz = sizeof(uint32_t); 2313 break; 2314 case NTB_REG_16: 2315 if (pci) 2316 umv = pci_read_config(ntb->device, reg, 2); 2317 else 2318 umv = ntb_reg_read(2, reg); 2319 outsz = sizeof(uint16_t); 2320 break; 2321 case NTB_REG_8: 2322 if (pci) 2323 umv = pci_read_config(ntb->device, reg, 1); 2324 else 2325 umv = ntb_reg_read(1, reg); 2326 outsz = sizeof(uint8_t); 2327 break; 2328 default: 2329 panic("bogus"); 2330 break; 2331 } 2332 } 2333 2334 /* Encode bigendian so that sysctl -x is legible. */ 2335 be64enc(be, umv); 2336 outp = ((char *)be) + sizeof(umv) - outsz; 2337 2338 error = SYSCTL_OUT(req, outp, outsz); 2339 if (error || !req->newptr) 2340 return (error); 2341 return (EINVAL); 2342} 2343 2344/* 2345 * Public API to the rest of the OS 2346 */ 2347 2348/** 2349 * ntb_get_max_spads() - get the total scratch regs usable 2350 * @ntb: pointer to ntb_softc instance 2351 * 2352 * This function returns the max 32bit scratchpad registers usable by the 2353 * upper layer. 2354 * 2355 * RETURNS: total number of scratch pad registers available 2356 */ 2357uint8_t 2358ntb_get_max_spads(struct ntb_softc *ntb) 2359{ 2360 2361 return (ntb->spad_count); 2362} 2363 2364uint8_t 2365ntb_mw_count(struct ntb_softc *ntb) 2366{ 2367 2368 return (ntb->mw_count); 2369} 2370 2371/** 2372 * ntb_spad_write() - write to the secondary scratchpad register 2373 * @ntb: pointer to ntb_softc instance 2374 * @idx: index to the scratchpad register, 0 based 2375 * @val: the data value to put into the register 2376 * 2377 * This function allows writing of a 32bit value to the indexed scratchpad 2378 * register. The register resides on the secondary (external) side. 2379 * 2380 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2381 */ 2382int 2383ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 2384{ 2385 2386 if (idx >= ntb->spad_count) 2387 return (EINVAL); 2388 2389 ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); 2390 2391 return (0); 2392} 2393 2394/** 2395 * ntb_spad_read() - read from the primary scratchpad register 2396 * @ntb: pointer to ntb_softc instance 2397 * @idx: index to scratchpad register, 0 based 2398 * @val: pointer to 32bit integer for storing the register value 2399 * 2400 * This function allows reading of the 32bit scratchpad register on 2401 * the primary (internal) side. 2402 * 2403 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2404 */ 2405int 2406ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 2407{ 2408 2409 if (idx >= ntb->spad_count) 2410 return (EINVAL); 2411 2412 *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4); 2413 2414 return (0); 2415} 2416 2417/** 2418 * ntb_peer_spad_write() - write to the secondary scratchpad register 2419 * @ntb: pointer to ntb_softc instance 2420 * @idx: index to the scratchpad register, 0 based 2421 * @val: the data value to put into the register 2422 * 2423 * This function allows writing of a 32bit value to the indexed scratchpad 2424 * register. The register resides on the secondary (external) side. 2425 * 2426 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2427 */ 2428int 2429ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 2430{ 2431 2432 if (idx >= ntb->spad_count) 2433 return (EINVAL); 2434 2435 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 2436 ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); 2437 else 2438 ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); 2439 2440 return (0); 2441} 2442 2443/** 2444 * ntb_peer_spad_read() - read from the primary scratchpad register 2445 * @ntb: pointer to ntb_softc instance 2446 * @idx: index to scratchpad register, 0 based 2447 * @val: pointer to 32bit integer for storing the register value 2448 * 2449 * This function allows reading of the 32bit scratchpad register on 2450 * the primary (internal) side. 2451 * 2452 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2453 */ 2454int 2455ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 2456{ 2457 2458 if (idx >= ntb->spad_count) 2459 return (EINVAL); 2460 2461 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 2462 *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); 2463 else 2464 *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); 2465 2466 return (0); 2467} 2468 2469/* 2470 * ntb_mw_get_range() - get the range of a memory window 2471 * @ntb: NTB device context 2472 * @idx: Memory window number 2473 * @base: OUT - the base address for mapping the memory window 2474 * @size: OUT - the size for mapping the memory window 2475 * @align: OUT - the base alignment for translating the memory window 2476 * @align_size: OUT - the size alignment for translating the memory window 2477 * 2478 * Get the range of a memory window. NULL may be given for any output 2479 * parameter if the value is not needed. The base and size may be used for 2480 * mapping the memory window, to access the peer memory. The alignment and 2481 * size may be used for translating the memory window, for the peer to access 2482 * memory on the local system. 2483 * 2484 * Return: Zero on success, otherwise an error number. 2485 */ 2486int 2487ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base, 2488 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size) 2489{ 2490 struct ntb_pci_bar_info *bar; 2491 size_t bar_b2b_off; 2492 2493 if (mw_idx >= ntb_mw_count(ntb)) 2494 return (EINVAL); 2495 2496 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, mw_idx)]; 2497 bar_b2b_off = 0; 2498 if (mw_idx == ntb->b2b_mw_idx) { 2499 KASSERT(ntb->b2b_off != 0, 2500 ("user shouldn't get non-shared b2b mw")); 2501 bar_b2b_off = ntb->b2b_off; 2502 } 2503 2504 if (base != NULL) 2505 *base = bar->pbase + bar_b2b_off; 2506 if (vbase != NULL) 2507 *vbase = bar->vbase + bar_b2b_off; 2508 if (size != NULL) 2509 *size = bar->size - bar_b2b_off; 2510 if (align != NULL) 2511 *align = bar->size; 2512 if (align_size != NULL) 2513 *align_size = 1; 2514 return (0); 2515} 2516 2517/* 2518 * ntb_mw_set_trans() - set the translation of a memory window 2519 * @ntb: NTB device context 2520 * @idx: Memory window number 2521 * @addr: The dma address local memory to expose to the peer 2522 * @size: The size of the local memory to expose to the peer 2523 * 2524 * Set the translation of a memory window. The peer may access local memory 2525 * through the window starting at the address, up to the size. The address 2526 * must be aligned to the alignment specified by ntb_mw_get_range(). The size 2527 * must be aligned to the size alignment specified by ntb_mw_get_range(). 2528 * 2529 * Return: Zero on success, otherwise an error number. 2530 */ 2531int 2532ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr, 2533 size_t size) 2534{ 2535 struct ntb_pci_bar_info *bar; 2536 uint64_t base, limit, reg_val; 2537 size_t bar_size, mw_size; 2538 uint32_t base_reg, xlat_reg, limit_reg; 2539 enum ntb_bar bar_num; 2540 2541 if (idx >= ntb_mw_count(ntb)) 2542 return (EINVAL); 2543 2544 bar_num = ntb_mw_to_bar(ntb, idx); 2545 bar = &ntb->bar_info[bar_num]; 2546 2547 bar_size = bar->size; 2548 if (idx == ntb->b2b_mw_idx) 2549 mw_size = bar_size - ntb->b2b_off; 2550 else 2551 mw_size = bar_size; 2552 2553 /* Hardware requires that addr is aligned to bar size */ 2554 if ((addr & (bar_size - 1)) != 0) 2555 return (EINVAL); 2556 2557 if (size > mw_size) 2558 return (EINVAL); 2559 2560 bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); 2561 2562 limit = 0; 2563 if (bar_is_64bit(ntb, bar_num)) { 2564 base = ntb_reg_read(8, base_reg) & BAR_HIGH_MASK; 2565 2566 if (limit_reg != 0 && size != mw_size) 2567 limit = base + size; 2568 2569 /* Set and verify translation address */ 2570 ntb_reg_write(8, xlat_reg, addr); 2571 reg_val = ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK; 2572 if (reg_val != addr) { 2573 ntb_reg_write(8, xlat_reg, 0); 2574 return (EIO); 2575 } 2576 2577 /* Set and verify the limit */ 2578 ntb_reg_write(8, limit_reg, limit); 2579 reg_val = ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK; 2580 if (reg_val != limit) { 2581 ntb_reg_write(8, limit_reg, base); 2582 ntb_reg_write(8, xlat_reg, 0); 2583 return (EIO); 2584 } 2585 } else { 2586 /* Configure 32-bit (split) BAR MW */ 2587 2588 if ((addr & UINT32_MAX) != addr) 2589 return (EINVAL); 2590 if (((addr + size) & UINT32_MAX) != (addr + size)) 2591 return (EINVAL); 2592 2593 base = ntb_reg_read(4, base_reg) & BAR_HIGH_MASK; 2594 2595 if (limit_reg != 0 && size != mw_size) 2596 limit = base + size; 2597 2598 /* Set and verify translation address */ 2599 ntb_reg_write(4, xlat_reg, addr); 2600 reg_val = ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK; 2601 if (reg_val != addr) { 2602 ntb_reg_write(4, xlat_reg, 0); 2603 return (EIO); 2604 } 2605 2606 /* Set and verify the limit */ 2607 ntb_reg_write(4, limit_reg, limit); 2608 reg_val = ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK; 2609 if (reg_val != limit) { 2610 ntb_reg_write(4, limit_reg, base); 2611 ntb_reg_write(4, xlat_reg, 0); 2612 return (EIO); 2613 } 2614 } 2615 return (0); 2616} 2617 2618/* 2619 * ntb_mw_clear_trans() - clear the translation of a memory window 2620 * @ntb: NTB device context 2621 * @idx: Memory window number 2622 * 2623 * Clear the translation of a memory window. The peer may no longer access 2624 * local memory through the window. 2625 * 2626 * Return: Zero on success, otherwise an error number. 2627 */ 2628int 2629ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx) 2630{ 2631 2632 return (ntb_mw_set_trans(ntb, mw_idx, 0, 0)); 2633} 2634 2635/* 2636 * ntb_mw_get_wc - Get the write-combine status of a memory window 2637 * 2638 * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if 2639 * idx is an invalid memory window). 2640 */ 2641int 2642ntb_mw_get_wc(struct ntb_softc *ntb, unsigned idx, bool *wc) 2643{ 2644 struct ntb_pci_bar_info *bar; 2645 2646 if (idx >= ntb_mw_count(ntb)) 2647 return (EINVAL); 2648 2649 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; 2650 *wc = bar->mapped_wc; 2651 return (0); 2652} 2653 2654/* 2655 * ntb_mw_set_wc - Set the write-combine status of a memory window 2656 * 2657 * If 'wc' matches the current status, this does nothing and succeeds. 2658 * 2659 * Returns: Zero on success, setting the caching attribute on the virtual 2660 * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid 2661 * memory window, or if changing the caching attribute fails). 2662 */ 2663int 2664ntb_mw_set_wc(struct ntb_softc *ntb, unsigned idx, bool wc) 2665{ 2666 struct ntb_pci_bar_info *bar; 2667 vm_memattr_t attr; 2668 int rc; 2669 2670 if (idx >= ntb_mw_count(ntb)) 2671 return (EINVAL); 2672 2673 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; 2674 if (bar->mapped_wc == wc) 2675 return (0); 2676 2677 if (wc) 2678 attr = VM_MEMATTR_WRITE_COMBINING; 2679 else 2680 attr = VM_MEMATTR_DEFAULT; 2681 2682 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, attr); 2683 if (rc == 0) 2684 bar->mapped_wc = wc; 2685 2686 return (rc); 2687} 2688 2689/** 2690 * ntb_peer_db_set() - Set the doorbell on the secondary/external side 2691 * @ntb: pointer to ntb_softc instance 2692 * @bit: doorbell bits to ring 2693 * 2694 * This function allows triggering of a doorbell on the secondary/external 2695 * side that will initiate an interrupt on the remote host 2696 */ 2697void 2698ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit) 2699{ 2700 2701 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { 2702 ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit); 2703 return; 2704 } 2705 2706 db_iowrite(ntb, ntb->peer_reg->db_bell, bit); 2707} 2708 2709/* 2710 * ntb_get_peer_db_addr() - Return the address of the remote doorbell register, 2711 * as well as the size of the register (via *sz_out). 2712 * 2713 * This function allows a caller using I/OAT DMA to chain the remote doorbell 2714 * ring to its memory window write. 2715 * 2716 * Note that writing the peer doorbell via a memory window will *not* generate 2717 * an interrupt on the remote host; that must be done seperately. 2718 */ 2719bus_addr_t 2720ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out) 2721{ 2722 struct ntb_pci_bar_info *bar; 2723 uint64_t regoff; 2724 2725 KASSERT(sz_out != NULL, ("must be non-NULL")); 2726 2727 if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { 2728 bar = &ntb->bar_info[NTB_CONFIG_BAR]; 2729 regoff = ntb->peer_reg->db_bell; 2730 } else { 2731 KASSERT((HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 2) || 2732 (!HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 1), 2733 ("mw_count invalid after setup")); 2734 KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, 2735 ("invalid b2b idx")); 2736 2737 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; 2738 regoff = XEON_PDOORBELL_OFFSET; 2739 } 2740 KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); 2741 2742 *sz_out = ntb->reg->db_size; 2743 /* HACK: Specific to current x86 bus implementation. */ 2744 return ((uint64_t)bar->pci_bus_handle + regoff); 2745} 2746 2747/* 2748 * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb 2749 * @ntb: NTB device context 2750 * 2751 * Hardware may support different number or arrangement of doorbell bits. 2752 * 2753 * Return: A mask of doorbell bits supported by the ntb. 2754 */ 2755uint64_t 2756ntb_db_valid_mask(struct ntb_softc *ntb) 2757{ 2758 2759 return (ntb->db_valid_mask); 2760} 2761 2762/* 2763 * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector 2764 * @ntb: NTB device context 2765 * @vector: Doorbell vector number 2766 * 2767 * Each interrupt vector may have a different number or arrangement of bits. 2768 * 2769 * Return: A mask of doorbell bits serviced by a vector. 2770 */ 2771uint64_t 2772ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector) 2773{ 2774 2775 if (vector > ntb->db_vec_count) 2776 return (0); 2777 return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector)); 2778} 2779 2780/** 2781 * ntb_link_is_up() - get the current ntb link state 2782 * @ntb: NTB device context 2783 * @speed: OUT - The link speed expressed as PCIe generation number 2784 * @width: OUT - The link width expressed as the number of PCIe lanes 2785 * 2786 * RETURNS: true or false based on the hardware link state 2787 */ 2788bool 2789ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed, 2790 enum ntb_width *width) 2791{ 2792 2793 if (speed != NULL) 2794 *speed = ntb_link_sta_speed(ntb); 2795 if (width != NULL) 2796 *width = ntb_link_sta_width(ntb); 2797 return (link_is_up(ntb)); 2798} 2799 2800static void 2801save_bar_parameters(struct ntb_pci_bar_info *bar) 2802{ 2803 2804 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 2805 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 2806 bar->pbase = rman_get_start(bar->pci_resource); 2807 bar->size = rman_get_size(bar->pci_resource); 2808 bar->vbase = rman_get_virtual(bar->pci_resource); 2809} 2810 2811device_t 2812ntb_get_device(struct ntb_softc *ntb) 2813{ 2814 2815 return (ntb->device); 2816} 2817 2818/* Export HW-specific errata information. */ 2819bool 2820ntb_has_feature(struct ntb_softc *ntb, uint32_t feature) 2821{ 2822 2823 return (HAS_FEATURE(feature)); 2824} 2825